var/home/core/zuul-output/0000755000175000017500000000000015116051751014527 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015116065713015476 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004772662215116065703017721 0ustar rootrootDec 09 16:56:52 crc systemd[1]: Starting Kubernetes Kubelet... Dec 09 16:56:53 crc restorecon[4696]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 16:56:53 crc restorecon[4696]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Dec 09 16:56:54 crc kubenswrapper[4840]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 09 16:56:54 crc kubenswrapper[4840]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Dec 09 16:56:54 crc kubenswrapper[4840]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 09 16:56:54 crc kubenswrapper[4840]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 09 16:56:54 crc kubenswrapper[4840]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Dec 09 16:56:54 crc kubenswrapper[4840]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.391184 4840 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.398922 4840 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.398958 4840 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.398993 4840 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399003 4840 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399011 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399020 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399028 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399037 4840 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399046 4840 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399057 4840 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399068 4840 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399076 4840 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399086 4840 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399094 4840 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399103 4840 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399113 4840 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399122 4840 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399130 4840 feature_gate.go:330] unrecognized feature gate: Example Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399139 4840 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399149 4840 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399158 4840 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399166 4840 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399174 4840 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399181 4840 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399189 4840 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399197 4840 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399225 4840 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399234 4840 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399242 4840 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399250 4840 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399258 4840 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399266 4840 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399273 4840 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399281 4840 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399289 4840 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399297 4840 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399305 4840 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399313 4840 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399320 4840 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399329 4840 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399337 4840 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399345 4840 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399352 4840 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399360 4840 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399368 4840 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399381 4840 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399391 4840 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399400 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399408 4840 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399417 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399426 4840 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399435 4840 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399443 4840 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399451 4840 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399459 4840 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399466 4840 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399475 4840 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399482 4840 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399491 4840 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399499 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399507 4840 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399515 4840 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399523 4840 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399530 4840 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399538 4840 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399546 4840 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399553 4840 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399562 4840 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399570 4840 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399577 4840 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.399585 4840 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400055 4840 flags.go:64] FLAG: --address="0.0.0.0" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400080 4840 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400097 4840 flags.go:64] FLAG: --anonymous-auth="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400137 4840 flags.go:64] FLAG: --application-metrics-count-limit="100" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400149 4840 flags.go:64] FLAG: --authentication-token-webhook="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400158 4840 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400170 4840 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400181 4840 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400191 4840 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400200 4840 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400211 4840 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400224 4840 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400234 4840 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400243 4840 flags.go:64] FLAG: --cgroup-root="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400252 4840 flags.go:64] FLAG: --cgroups-per-qos="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400261 4840 flags.go:64] FLAG: --client-ca-file="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400270 4840 flags.go:64] FLAG: --cloud-config="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400279 4840 flags.go:64] FLAG: --cloud-provider="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400288 4840 flags.go:64] FLAG: --cluster-dns="[]" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400299 4840 flags.go:64] FLAG: --cluster-domain="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400308 4840 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400318 4840 flags.go:64] FLAG: --config-dir="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400327 4840 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400337 4840 flags.go:64] FLAG: --container-log-max-files="5" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400349 4840 flags.go:64] FLAG: --container-log-max-size="10Mi" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400357 4840 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400368 4840 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400378 4840 flags.go:64] FLAG: --containerd-namespace="k8s.io" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400386 4840 flags.go:64] FLAG: --contention-profiling="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400396 4840 flags.go:64] FLAG: --cpu-cfs-quota="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400404 4840 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400414 4840 flags.go:64] FLAG: --cpu-manager-policy="none" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400423 4840 flags.go:64] FLAG: --cpu-manager-policy-options="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400435 4840 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400444 4840 flags.go:64] FLAG: --enable-controller-attach-detach="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400453 4840 flags.go:64] FLAG: --enable-debugging-handlers="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400461 4840 flags.go:64] FLAG: --enable-load-reader="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400472 4840 flags.go:64] FLAG: --enable-server="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400480 4840 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400494 4840 flags.go:64] FLAG: --event-burst="100" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400503 4840 flags.go:64] FLAG: --event-qps="50" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400512 4840 flags.go:64] FLAG: --event-storage-age-limit="default=0" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400523 4840 flags.go:64] FLAG: --event-storage-event-limit="default=0" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400533 4840 flags.go:64] FLAG: --eviction-hard="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400543 4840 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400552 4840 flags.go:64] FLAG: --eviction-minimum-reclaim="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400561 4840 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400574 4840 flags.go:64] FLAG: --eviction-soft="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400583 4840 flags.go:64] FLAG: --eviction-soft-grace-period="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400592 4840 flags.go:64] FLAG: --exit-on-lock-contention="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400601 4840 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400610 4840 flags.go:64] FLAG: --experimental-mounter-path="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400619 4840 flags.go:64] FLAG: --fail-cgroupv1="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400628 4840 flags.go:64] FLAG: --fail-swap-on="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400636 4840 flags.go:64] FLAG: --feature-gates="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400659 4840 flags.go:64] FLAG: --file-check-frequency="20s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400668 4840 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400677 4840 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400687 4840 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400696 4840 flags.go:64] FLAG: --healthz-port="10248" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400705 4840 flags.go:64] FLAG: --help="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400713 4840 flags.go:64] FLAG: --hostname-override="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400722 4840 flags.go:64] FLAG: --housekeeping-interval="10s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400732 4840 flags.go:64] FLAG: --http-check-frequency="20s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400740 4840 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400749 4840 flags.go:64] FLAG: --image-credential-provider-config="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400757 4840 flags.go:64] FLAG: --image-gc-high-threshold="85" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400766 4840 flags.go:64] FLAG: --image-gc-low-threshold="80" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400775 4840 flags.go:64] FLAG: --image-service-endpoint="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400784 4840 flags.go:64] FLAG: --kernel-memcg-notification="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400792 4840 flags.go:64] FLAG: --kube-api-burst="100" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400802 4840 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400812 4840 flags.go:64] FLAG: --kube-api-qps="50" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400821 4840 flags.go:64] FLAG: --kube-reserved="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400830 4840 flags.go:64] FLAG: --kube-reserved-cgroup="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400838 4840 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400847 4840 flags.go:64] FLAG: --kubelet-cgroups="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400855 4840 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400865 4840 flags.go:64] FLAG: --lock-file="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400874 4840 flags.go:64] FLAG: --log-cadvisor-usage="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400883 4840 flags.go:64] FLAG: --log-flush-frequency="5s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400893 4840 flags.go:64] FLAG: --log-json-info-buffer-size="0" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400906 4840 flags.go:64] FLAG: --log-json-split-stream="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400915 4840 flags.go:64] FLAG: --log-text-info-buffer-size="0" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400925 4840 flags.go:64] FLAG: --log-text-split-stream="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400935 4840 flags.go:64] FLAG: --logging-format="text" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400943 4840 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400953 4840 flags.go:64] FLAG: --make-iptables-util-chains="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400986 4840 flags.go:64] FLAG: --manifest-url="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.400996 4840 flags.go:64] FLAG: --manifest-url-header="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401007 4840 flags.go:64] FLAG: --max-housekeeping-interval="15s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401016 4840 flags.go:64] FLAG: --max-open-files="1000000" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401027 4840 flags.go:64] FLAG: --max-pods="110" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401036 4840 flags.go:64] FLAG: --maximum-dead-containers="-1" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401045 4840 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401054 4840 flags.go:64] FLAG: --memory-manager-policy="None" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401063 4840 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401072 4840 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401081 4840 flags.go:64] FLAG: --node-ip="192.168.126.11" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401090 4840 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401110 4840 flags.go:64] FLAG: --node-status-max-images="50" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401118 4840 flags.go:64] FLAG: --node-status-update-frequency="10s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401128 4840 flags.go:64] FLAG: --oom-score-adj="-999" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401137 4840 flags.go:64] FLAG: --pod-cidr="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401146 4840 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401160 4840 flags.go:64] FLAG: --pod-manifest-path="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401169 4840 flags.go:64] FLAG: --pod-max-pids="-1" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401177 4840 flags.go:64] FLAG: --pods-per-core="0" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401186 4840 flags.go:64] FLAG: --port="10250" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401198 4840 flags.go:64] FLAG: --protect-kernel-defaults="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401206 4840 flags.go:64] FLAG: --provider-id="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401215 4840 flags.go:64] FLAG: --qos-reserved="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401224 4840 flags.go:64] FLAG: --read-only-port="10255" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401233 4840 flags.go:64] FLAG: --register-node="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401243 4840 flags.go:64] FLAG: --register-schedulable="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401252 4840 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401275 4840 flags.go:64] FLAG: --registry-burst="10" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401284 4840 flags.go:64] FLAG: --registry-qps="5" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401293 4840 flags.go:64] FLAG: --reserved-cpus="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401303 4840 flags.go:64] FLAG: --reserved-memory="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401314 4840 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401323 4840 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401333 4840 flags.go:64] FLAG: --rotate-certificates="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401342 4840 flags.go:64] FLAG: --rotate-server-certificates="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401351 4840 flags.go:64] FLAG: --runonce="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401360 4840 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401369 4840 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401378 4840 flags.go:64] FLAG: --seccomp-default="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401387 4840 flags.go:64] FLAG: --serialize-image-pulls="true" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401396 4840 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401406 4840 flags.go:64] FLAG: --storage-driver-db="cadvisor" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401415 4840 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401425 4840 flags.go:64] FLAG: --storage-driver-password="root" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401434 4840 flags.go:64] FLAG: --storage-driver-secure="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401443 4840 flags.go:64] FLAG: --storage-driver-table="stats" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401453 4840 flags.go:64] FLAG: --storage-driver-user="root" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401462 4840 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401471 4840 flags.go:64] FLAG: --sync-frequency="1m0s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401481 4840 flags.go:64] FLAG: --system-cgroups="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401489 4840 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401503 4840 flags.go:64] FLAG: --system-reserved-cgroup="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401512 4840 flags.go:64] FLAG: --tls-cert-file="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401521 4840 flags.go:64] FLAG: --tls-cipher-suites="[]" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401531 4840 flags.go:64] FLAG: --tls-min-version="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401540 4840 flags.go:64] FLAG: --tls-private-key-file="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401549 4840 flags.go:64] FLAG: --topology-manager-policy="none" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401567 4840 flags.go:64] FLAG: --topology-manager-policy-options="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401576 4840 flags.go:64] FLAG: --topology-manager-scope="container" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401585 4840 flags.go:64] FLAG: --v="2" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401596 4840 flags.go:64] FLAG: --version="false" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401607 4840 flags.go:64] FLAG: --vmodule="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401617 4840 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.401629 4840 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401849 4840 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401860 4840 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401870 4840 feature_gate.go:330] unrecognized feature gate: Example Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401879 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401889 4840 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401897 4840 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401905 4840 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401913 4840 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401920 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401929 4840 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401937 4840 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401945 4840 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401952 4840 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401960 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.401993 4840 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402000 4840 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402008 4840 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402016 4840 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402023 4840 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402031 4840 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402042 4840 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402051 4840 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402061 4840 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402071 4840 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402081 4840 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402094 4840 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402103 4840 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402111 4840 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402118 4840 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402127 4840 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402135 4840 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402144 4840 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402151 4840 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402159 4840 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402167 4840 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402175 4840 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402183 4840 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402191 4840 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402200 4840 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402208 4840 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402216 4840 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402224 4840 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402232 4840 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402240 4840 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402248 4840 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402256 4840 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402264 4840 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402272 4840 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402279 4840 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402287 4840 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402295 4840 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402303 4840 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402311 4840 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402318 4840 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402326 4840 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402334 4840 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402342 4840 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402353 4840 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402361 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402369 4840 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402377 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402388 4840 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402399 4840 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402409 4840 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402418 4840 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402427 4840 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402436 4840 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402444 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402452 4840 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402460 4840 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.402467 4840 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.402480 4840 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.416013 4840 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.416052 4840 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416222 4840 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416236 4840 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416246 4840 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416254 4840 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416262 4840 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416270 4840 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416278 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416286 4840 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416295 4840 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416302 4840 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416310 4840 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416319 4840 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416327 4840 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416336 4840 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416345 4840 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416357 4840 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416368 4840 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416377 4840 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416386 4840 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416395 4840 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416405 4840 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416416 4840 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416424 4840 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416433 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416441 4840 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416448 4840 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416456 4840 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416464 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416472 4840 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416480 4840 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416487 4840 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416495 4840 feature_gate.go:330] unrecognized feature gate: Example Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416503 4840 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416511 4840 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416522 4840 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416530 4840 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416539 4840 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416547 4840 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416555 4840 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416563 4840 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416571 4840 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416579 4840 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416587 4840 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416596 4840 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416604 4840 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416612 4840 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416622 4840 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416632 4840 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416640 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416648 4840 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416656 4840 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416665 4840 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416674 4840 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416681 4840 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416689 4840 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416698 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416706 4840 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416714 4840 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416722 4840 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416729 4840 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416737 4840 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416745 4840 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416752 4840 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416760 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416770 4840 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416780 4840 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416789 4840 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416797 4840 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416805 4840 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416814 4840 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.416824 4840 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.416836 4840 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417090 4840 feature_gate.go:330] unrecognized feature gate: Example Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417106 4840 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417115 4840 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417125 4840 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417135 4840 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417143 4840 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417151 4840 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417160 4840 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417170 4840 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417183 4840 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417192 4840 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417200 4840 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417209 4840 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417217 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417225 4840 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417233 4840 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417240 4840 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417248 4840 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417255 4840 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417264 4840 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417272 4840 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417279 4840 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417287 4840 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417295 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417302 4840 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417310 4840 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417318 4840 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417326 4840 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417333 4840 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417341 4840 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417349 4840 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417356 4840 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417366 4840 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417377 4840 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417387 4840 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417396 4840 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417406 4840 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417413 4840 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417421 4840 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417429 4840 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417437 4840 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417445 4840 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417452 4840 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417460 4840 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417468 4840 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417476 4840 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417484 4840 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417491 4840 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417501 4840 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417510 4840 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417518 4840 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417525 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417533 4840 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417541 4840 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417549 4840 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417557 4840 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417565 4840 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417572 4840 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417580 4840 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417588 4840 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417597 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417605 4840 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417612 4840 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417620 4840 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417627 4840 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417635 4840 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417643 4840 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417651 4840 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417659 4840 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417666 4840 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.417677 4840 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.417690 4840 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.417917 4840 server.go:940] "Client rotation is on, will bootstrap in background" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.423811 4840 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.423939 4840 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.425210 4840 server.go:997] "Starting client certificate rotation" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.425276 4840 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.425494 4840 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-05 01:12:59.118323825 +0000 UTC Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.425682 4840 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.437417 4840 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.439288 4840 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.204:6443: connect: connection refused" logger="UnhandledError" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.441518 4840 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.453953 4840 log.go:25] "Validated CRI v1 runtime API" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.486846 4840 log.go:25] "Validated CRI v1 image API" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.488767 4840 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.492510 4840 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-12-09-16-51-58-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.492556 4840 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.521922 4840 manager.go:217] Machine: {Timestamp:2025-12-09 16:56:54.519198464 +0000 UTC m=+0.510309167 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654132736 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:996aaa93-f1e3-43a6-a427-94b00d03e134 BootID:6d7e6f5e-3bc8-4940-b935-65e21247c851 Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730829824 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827068416 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:56:c8:6c Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:56:c8:6c Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:46:f1:f9 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:f8:cc:a4 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:fe:a5:83 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:29:14:7b Speed:-1 Mtu:1496} {Name:eth10 MacAddress:02:e5:37:f2:de:73 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:1e:5c:1f:01:87:69 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654132736 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.522403 4840 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.522618 4840 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.523318 4840 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.523696 4840 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.523747 4840 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.524165 4840 topology_manager.go:138] "Creating topology manager with none policy" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.524184 4840 container_manager_linux.go:303] "Creating device plugin manager" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.524395 4840 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.524447 4840 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.524863 4840 state_mem.go:36] "Initialized new in-memory state store" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.525042 4840 server.go:1245] "Using root directory" path="/var/lib/kubelet" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.526016 4840 kubelet.go:418] "Attempting to sync node with API server" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.526048 4840 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.526134 4840 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.526162 4840 kubelet.go:324] "Adding apiserver pod source" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.526185 4840 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.528469 4840 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.529010 4840 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.531508 4840 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532619 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532665 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532688 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532702 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532737 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532752 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532774 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532819 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532850 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532866 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532918 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.532949 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.533199 4840 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.204:6443: connect: connection refused Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.533228 4840 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.204:6443: connect: connection refused Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.533333 4840 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.533340 4840 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.204:6443: connect: connection refused" logger="UnhandledError" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.533344 4840 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.204:6443: connect: connection refused" logger="UnhandledError" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.534213 4840 server.go:1280] "Started kubelet" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.535030 4840 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.535027 4840 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.536640 4840 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.204:6443: connect: connection refused Dec 09 16:56:54 crc systemd[1]: Started Kubernetes Kubelet. Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.537134 4840 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.537165 4840 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.537341 4840 volume_manager.go:287] "The desired_state_of_world populator starts" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.537369 4840 volume_manager.go:289] "Starting Kubelet Volume Manager" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.537505 4840 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.537596 4840 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.538126 4840 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 19:36:37.130050698 +0000 UTC Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.538191 4840 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 530h39m42.591866206s for next certificate rotation Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.539565 4840 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.204:6443: connect: connection refused Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.539574 4840 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" interval="200ms" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.539685 4840 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.204:6443: connect: connection refused" logger="UnhandledError" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.539345 4840 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.204:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187f9a76ed8366ad default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-09 16:56:54.534153901 +0000 UTC m=+0.525264604,LastTimestamp:2025-12-09 16:56:54.534153901 +0000 UTC m=+0.525264604,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.540282 4840 factory.go:55] Registering systemd factory Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.540308 4840 factory.go:221] Registration of the systemd container factory successfully Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.540586 4840 server.go:460] "Adding debug handlers to kubelet server" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.538699 4840 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.544601 4840 factory.go:153] Registering CRI-O factory Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.544644 4840 factory.go:221] Registration of the crio container factory successfully Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.545024 4840 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.545322 4840 factory.go:103] Registering Raw factory Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.545396 4840 manager.go:1196] Started watching for new ooms in manager Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.553719 4840 manager.go:319] Starting recovery of all containers Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557602 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557678 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557700 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557717 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557767 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557786 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557812 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557837 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557863 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557879 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557898 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557916 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557932 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557951 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.557994 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558014 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558030 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558047 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558063 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558080 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558095 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558146 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558166 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558182 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558199 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558217 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558248 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558266 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558284 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558300 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558320 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558336 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558405 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558427 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558444 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558460 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558505 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558525 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558550 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558567 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558585 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558601 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558619 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558636 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558653 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558670 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558701 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558719 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558739 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558756 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558779 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558795 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558824 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558844 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558862 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558879 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558899 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.558984 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559007 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559027 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559045 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559061 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559081 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559097 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559114 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559130 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559154 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559172 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559190 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559209 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559225 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559254 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559271 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559286 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559302 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559319 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559334 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559352 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559373 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559390 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559406 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559433 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559457 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559475 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559491 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559507 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559532 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559549 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559565 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559581 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559596 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559613 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559699 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559717 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559738 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559755 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559771 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559790 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559808 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559827 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559845 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559871 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559897 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559915 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.559984 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.560008 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.560026 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.560045 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.560814 4840 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.560858 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.560884 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.560902 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.560920 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.560940 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.560958 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561047 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561065 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561081 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561099 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561134 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561154 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561170 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561187 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561203 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561221 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561237 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561256 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561274 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561293 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561310 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561339 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561357 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561374 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561394 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561422 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561440 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561457 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561473 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561492 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561511 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561527 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561545 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561565 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561584 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561603 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561620 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561637 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561656 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561684 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561701 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561719 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561736 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561759 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561827 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561852 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561874 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561891 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561907 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561927 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561946 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.561985 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562004 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562023 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562043 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562059 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562075 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562093 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562111 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562127 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562144 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562160 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562177 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562196 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562214 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562235 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562253 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562271 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562289 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562309 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562329 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562346 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562363 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562381 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562397 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562414 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562431 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562448 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562465 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562482 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562499 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562518 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562535 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562552 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562569 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562589 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562606 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562623 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562640 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562656 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562673 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562689 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562704 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562722 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562739 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562755 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562773 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562801 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562819 4840 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562842 4840 reconstruct.go:97] "Volume reconstruction finished" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.562854 4840 reconciler.go:26] "Reconciler: start to sync state" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.581120 4840 manager.go:324] Recovery completed Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.597014 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.598379 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.598416 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.598430 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.599541 4840 cpu_manager.go:225] "Starting CPU manager" policy="none" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.599562 4840 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.599584 4840 state_mem.go:36] "Initialized new in-memory state store" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.604452 4840 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.607194 4840 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.607270 4840 status_manager.go:217] "Starting to sync pod status with apiserver" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.607303 4840 kubelet.go:2335] "Starting kubelet main sync loop" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.607383 4840 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.613045 4840 policy_none.go:49] "None policy: Start" Dec 09 16:56:54 crc kubenswrapper[4840]: W1209 16:56:54.613387 4840 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.204:6443: connect: connection refused Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.613518 4840 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.204:6443: connect: connection refused" logger="UnhandledError" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.614944 4840 memory_manager.go:170] "Starting memorymanager" policy="None" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.615023 4840 state_mem.go:35] "Initializing new in-memory state store" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.638482 4840 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.676467 4840 manager.go:334] "Starting Device Plugin manager" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.676529 4840 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.676546 4840 server.go:79] "Starting device plugin registration server" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.677004 4840 eviction_manager.go:189] "Eviction manager: starting control loop" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.677025 4840 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.677242 4840 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.677314 4840 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.677320 4840 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.683411 4840 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.707493 4840 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc"] Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.707575 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.708432 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.708469 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.708480 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.708586 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.709154 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.709181 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.709190 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.710323 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.710365 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.710390 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.710551 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.710636 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.711242 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.711269 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.711278 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.711522 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.711539 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.711551 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.711670 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.711734 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.711752 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.712407 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.712458 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.712478 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.712764 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.712788 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.712798 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.713269 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.713305 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.713321 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.713505 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.714264 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.714312 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.714376 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.714395 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.714410 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.714544 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.714574 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.715006 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.715035 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.715045 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.715626 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.715663 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.715680 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.741097 4840 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" interval="400ms" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.764822 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.764871 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.764922 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.764946 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.764997 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.765022 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.765046 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.765070 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.765093 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.765115 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.765270 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.765314 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.765345 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.765369 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.765392 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.778000 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.779392 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.779425 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.779438 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.779466 4840 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.779952 4840 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.204:6443: connect: connection refused" node="crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866317 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866385 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866425 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866460 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866496 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866532 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866570 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866588 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866596 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866622 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866629 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866559 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866632 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866697 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866697 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866737 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866777 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866799 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866738 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866823 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866844 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866883 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866890 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866915 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866948 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.866948 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.867021 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.867029 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.867110 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.867142 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.980292 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.981768 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.981835 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.981853 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:54 crc kubenswrapper[4840]: I1209 16:56:54.981895 4840 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 16:56:54 crc kubenswrapper[4840]: E1209 16:56:54.982561 4840 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.204:6443: connect: connection refused" node="crc" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.052379 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.061896 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.092064 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 16:56:55 crc kubenswrapper[4840]: W1209 16:56:55.097080 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-107c7e9d753efd3aac9a7f366fd77943eb29daa0f6735c045b5765ca8e70cf62 WatchSource:0}: Error finding container 107c7e9d753efd3aac9a7f366fd77943eb29daa0f6735c045b5765ca8e70cf62: Status 404 returned error can't find the container with id 107c7e9d753efd3aac9a7f366fd77943eb29daa0f6735c045b5765ca8e70cf62 Dec 09 16:56:55 crc kubenswrapper[4840]: W1209 16:56:55.102332 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-35fbdd245f35b589424c5148325c022b419a1ccafff4d8951042e0319318c3c0 WatchSource:0}: Error finding container 35fbdd245f35b589424c5148325c022b419a1ccafff4d8951042e0319318c3c0: Status 404 returned error can't find the container with id 35fbdd245f35b589424c5148325c022b419a1ccafff4d8951042e0319318c3c0 Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.114782 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 09 16:56:55 crc kubenswrapper[4840]: W1209 16:56:55.116639 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-30767bef134741ec00b2009f4b6e783e8d6cd3654216adfba4cada0d4882e65b WatchSource:0}: Error finding container 30767bef134741ec00b2009f4b6e783e8d6cd3654216adfba4cada0d4882e65b: Status 404 returned error can't find the container with id 30767bef134741ec00b2009f4b6e783e8d6cd3654216adfba4cada0d4882e65b Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.120152 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:55 crc kubenswrapper[4840]: W1209 16:56:55.140543 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-33ca21da5143c54b5090d91999f41d12f9cc859d1f9ddb162144df98d9292bd0 WatchSource:0}: Error finding container 33ca21da5143c54b5090d91999f41d12f9cc859d1f9ddb162144df98d9292bd0: Status 404 returned error can't find the container with id 33ca21da5143c54b5090d91999f41d12f9cc859d1f9ddb162144df98d9292bd0 Dec 09 16:56:55 crc kubenswrapper[4840]: E1209 16:56:55.142236 4840 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" interval="800ms" Dec 09 16:56:55 crc kubenswrapper[4840]: W1209 16:56:55.155072 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-06412a04f3a481a0b5396a160c14b447b5313b4b67c42cb19dd3b3ab9b6dd409 WatchSource:0}: Error finding container 06412a04f3a481a0b5396a160c14b447b5313b4b67c42cb19dd3b3ab9b6dd409: Status 404 returned error can't find the container with id 06412a04f3a481a0b5396a160c14b447b5313b4b67c42cb19dd3b3ab9b6dd409 Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.383064 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.384364 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.384419 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.384434 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.384468 4840 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 16:56:55 crc kubenswrapper[4840]: E1209 16:56:55.384946 4840 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.204:6443: connect: connection refused" node="crc" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.538248 4840 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.204:6443: connect: connection refused Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.611855 4840 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75" exitCode=0 Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.611916 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75"} Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.611995 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"06412a04f3a481a0b5396a160c14b447b5313b4b67c42cb19dd3b3ab9b6dd409"} Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.612074 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.613142 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.613164 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.613172 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.613530 4840 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="fd16f434034e9eb9eed8184a8b297de0b834407afb41e82886077f969e3c3fc5" exitCode=0 Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.613579 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"fd16f434034e9eb9eed8184a8b297de0b834407afb41e82886077f969e3c3fc5"} Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.613599 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"33ca21da5143c54b5090d91999f41d12f9cc859d1f9ddb162144df98d9292bd0"} Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.613682 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.614172 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.614479 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.614496 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.614502 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.614602 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.614620 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.614630 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.615386 4840 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="bf0583e14a5da6b278cd9ca7806db8b6798b5fc43a68c381bf2beceb0d7a8458" exitCode=0 Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.615422 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"bf0583e14a5da6b278cd9ca7806db8b6798b5fc43a68c381bf2beceb0d7a8458"} Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.615436 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"30767bef134741ec00b2009f4b6e783e8d6cd3654216adfba4cada0d4882e65b"} Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.615480 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.618821 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.618892 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.618903 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.620336 4840 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054" exitCode=0 Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.620417 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054"} Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.620463 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"107c7e9d753efd3aac9a7f366fd77943eb29daa0f6735c045b5765ca8e70cf62"} Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.620541 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.621226 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.621249 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.621258 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.623127 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012"} Dec 09 16:56:55 crc kubenswrapper[4840]: I1209 16:56:55.623152 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"35fbdd245f35b589424c5148325c022b419a1ccafff4d8951042e0319318c3c0"} Dec 09 16:56:55 crc kubenswrapper[4840]: W1209 16:56:55.669533 4840 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.204:6443: connect: connection refused Dec 09 16:56:55 crc kubenswrapper[4840]: E1209 16:56:55.669625 4840 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.204:6443: connect: connection refused" logger="UnhandledError" Dec 09 16:56:55 crc kubenswrapper[4840]: W1209 16:56:55.772628 4840 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.204:6443: connect: connection refused Dec 09 16:56:55 crc kubenswrapper[4840]: E1209 16:56:55.772714 4840 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.204:6443: connect: connection refused" logger="UnhandledError" Dec 09 16:56:55 crc kubenswrapper[4840]: W1209 16:56:55.855387 4840 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.204:6443: connect: connection refused Dec 09 16:56:55 crc kubenswrapper[4840]: E1209 16:56:55.855496 4840 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.204:6443: connect: connection refused" logger="UnhandledError" Dec 09 16:56:55 crc kubenswrapper[4840]: W1209 16:56:55.918099 4840 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.204:6443: connect: connection refused Dec 09 16:56:55 crc kubenswrapper[4840]: E1209 16:56:55.918207 4840 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.204:6443: connect: connection refused" logger="UnhandledError" Dec 09 16:56:55 crc kubenswrapper[4840]: E1209 16:56:55.944553 4840 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" interval="1.6s" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.185174 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.186609 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.186710 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.186776 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.186914 4840 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 16:56:56 crc kubenswrapper[4840]: E1209 16:56:56.188093 4840 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.204:6443: connect: connection refused" node="crc" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.537983 4840 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.204:6443: connect: connection refused Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.608389 4840 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 09 16:56:56 crc kubenswrapper[4840]: E1209 16:56:56.609454 4840 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.204:6443: connect: connection refused" logger="UnhandledError" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.632174 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"b1eb414a69ff10b311d00e730999ba0b396da4bcb168868c35818b561b86fb59"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.632321 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.633206 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.633239 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.633252 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.634333 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"9b40a9b19ba16f8f7e6b72c162d1792bb6cd1460e41c5911dd032ab8d673329f"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.634359 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"cdce315b9b993e9decc1b8f96634ceb63471f2287cd1d4859e26e7b37924df5a"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.634373 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7749be321a920bec071d4eaa088051a73b6a4337162342c5cd9b3ce68c483d10"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.634442 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.635046 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.635070 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.635081 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.637305 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.637329 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.637333 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.637348 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.637929 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.637950 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.637973 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.640012 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.640033 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.640045 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.640056 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.641496 4840 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="52e84e899320e8467339307bbaaa0f432bb42c32b89feb0056d992db04bafb74" exitCode=0 Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.641525 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"52e84e899320e8467339307bbaaa0f432bb42c32b89feb0056d992db04bafb74"} Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.641593 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.642136 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.642159 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:56 crc kubenswrapper[4840]: I1209 16:56:56.642167 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.648939 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8"} Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.649083 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.651052 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.651121 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.651146 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.652583 4840 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="35c0fd90889fb10eb2e75e8d254ad2254b9379d565bcdc31d0622ba14157e6d4" exitCode=0 Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.652717 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"35c0fd90889fb10eb2e75e8d254ad2254b9379d565bcdc31d0622ba14157e6d4"} Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.652933 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.653247 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.654613 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.654685 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.654712 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.654846 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.654908 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.654928 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.788288 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.789924 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.790021 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.790048 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:57 crc kubenswrapper[4840]: I1209 16:56:57.790100 4840 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.662720 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1728baeea5925b5f04bbaccce5c4851092c8455ec1fd7bffccc75bc5ba3008ca"} Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.662786 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"de5b7c3d98084e4e6753b27f29cca0bff8b2274f16532bd02eea5f92668c710e"} Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.662805 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"937cb2db4996719fa00b2c3951cb389bab4640e9e5b54f09f40fbcf50a5832c2"} Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.662828 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.662790 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.664914 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.664957 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.664993 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.755325 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.755583 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.757177 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.757239 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.757264 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:58 crc kubenswrapper[4840]: I1209 16:56:58.989362 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.672804 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"eb0c11c1e35bd3fe2cb2fbb04248b3b1b68f6ce041cca0906526cdde9810608a"} Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.672892 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.672998 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.672907 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4fbbc5dd789e82e2c51bdfedfee02456f875361fbf53a6945d58957beeac5305"} Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.674471 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.674545 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.674569 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.674483 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.674647 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.674665 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.892904 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.893237 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.895180 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.895246 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:56:59 crc kubenswrapper[4840]: I1209 16:56:59.895274 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:00 crc kubenswrapper[4840]: I1209 16:57:00.675326 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:00 crc kubenswrapper[4840]: I1209 16:57:00.675330 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:00 crc kubenswrapper[4840]: I1209 16:57:00.676887 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:00 crc kubenswrapper[4840]: I1209 16:57:00.676948 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:00 crc kubenswrapper[4840]: I1209 16:57:00.677004 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:00 crc kubenswrapper[4840]: I1209 16:57:00.677025 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:00 crc kubenswrapper[4840]: I1209 16:57:00.677062 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:00 crc kubenswrapper[4840]: I1209 16:57:00.677074 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:00 crc kubenswrapper[4840]: I1209 16:57:00.732899 4840 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 09 16:57:00 crc kubenswrapper[4840]: I1209 16:57:00.738690 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.480308 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.480531 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.482382 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.482546 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.482576 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.488223 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.677580 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.677629 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.679431 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.679502 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.679520 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.679437 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.679572 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.679584 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:01 crc kubenswrapper[4840]: I1209 16:57:01.756701 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:57:02 crc kubenswrapper[4840]: I1209 16:57:02.354747 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Dec 09 16:57:02 crc kubenswrapper[4840]: I1209 16:57:02.355043 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:02 crc kubenswrapper[4840]: I1209 16:57:02.356407 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:02 crc kubenswrapper[4840]: I1209 16:57:02.356452 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:02 crc kubenswrapper[4840]: I1209 16:57:02.356469 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:02 crc kubenswrapper[4840]: I1209 16:57:02.679784 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:02 crc kubenswrapper[4840]: I1209 16:57:02.680816 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:02 crc kubenswrapper[4840]: I1209 16:57:02.680888 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:02 crc kubenswrapper[4840]: I1209 16:57:02.680906 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:03 crc kubenswrapper[4840]: I1209 16:57:03.729276 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Dec 09 16:57:03 crc kubenswrapper[4840]: I1209 16:57:03.729468 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:03 crc kubenswrapper[4840]: I1209 16:57:03.733601 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:03 crc kubenswrapper[4840]: I1209 16:57:03.733652 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:03 crc kubenswrapper[4840]: I1209 16:57:03.733669 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:04 crc kubenswrapper[4840]: E1209 16:57:04.683573 4840 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 09 16:57:04 crc kubenswrapper[4840]: I1209 16:57:04.757167 4840 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 16:57:04 crc kubenswrapper[4840]: I1209 16:57:04.758207 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 09 16:57:05 crc kubenswrapper[4840]: I1209 16:57:05.781884 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:57:05 crc kubenswrapper[4840]: I1209 16:57:05.782117 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:05 crc kubenswrapper[4840]: I1209 16:57:05.783572 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:05 crc kubenswrapper[4840]: I1209 16:57:05.783636 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:05 crc kubenswrapper[4840]: I1209 16:57:05.783654 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:05 crc kubenswrapper[4840]: I1209 16:57:05.789628 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:57:06 crc kubenswrapper[4840]: I1209 16:57:06.024618 4840 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 09 16:57:06 crc kubenswrapper[4840]: I1209 16:57:06.024713 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 09 16:57:06 crc kubenswrapper[4840]: I1209 16:57:06.169526 4840 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 09 16:57:06 crc kubenswrapper[4840]: I1209 16:57:06.169960 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 09 16:57:06 crc kubenswrapper[4840]: I1209 16:57:06.692310 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:06 crc kubenswrapper[4840]: I1209 16:57:06.693496 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:06 crc kubenswrapper[4840]: I1209 16:57:06.693635 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:06 crc kubenswrapper[4840]: I1209 16:57:06.693730 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:07 crc kubenswrapper[4840]: I1209 16:57:07.539023 4840 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Dec 09 16:57:07 crc kubenswrapper[4840]: E1209 16:57:07.546048 4840 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="3.2s" Dec 09 16:57:07 crc kubenswrapper[4840]: W1209 16:57:07.668166 4840 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Dec 09 16:57:07 crc kubenswrapper[4840]: I1209 16:57:07.668288 4840 trace.go:236] Trace[1798612174]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (09-Dec-2025 16:56:57.667) (total time: 10000ms): Dec 09 16:57:07 crc kubenswrapper[4840]: Trace[1798612174]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10000ms (16:57:07.668) Dec 09 16:57:07 crc kubenswrapper[4840]: Trace[1798612174]: [10.000887772s] [10.000887772s] END Dec 09 16:57:07 crc kubenswrapper[4840]: E1209 16:57:07.668316 4840 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Dec 09 16:57:07 crc kubenswrapper[4840]: E1209 16:57:07.791644 4840 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Dec 09 16:57:07 crc kubenswrapper[4840]: I1209 16:57:07.935788 4840 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 09 16:57:07 crc kubenswrapper[4840]: I1209 16:57:07.935853 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 09 16:57:07 crc kubenswrapper[4840]: I1209 16:57:07.943567 4840 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 09 16:57:07 crc kubenswrapper[4840]: I1209 16:57:07.943753 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 09 16:57:10 crc kubenswrapper[4840]: I1209 16:57:10.747345 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:57:10 crc kubenswrapper[4840]: I1209 16:57:10.747603 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:10 crc kubenswrapper[4840]: I1209 16:57:10.749253 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:10 crc kubenswrapper[4840]: I1209 16:57:10.749303 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:10 crc kubenswrapper[4840]: I1209 16:57:10.749321 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:10 crc kubenswrapper[4840]: I1209 16:57:10.754868 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:57:10 crc kubenswrapper[4840]: I1209 16:57:10.992188 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:10 crc kubenswrapper[4840]: I1209 16:57:10.994010 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:10 crc kubenswrapper[4840]: I1209 16:57:10.994069 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:10 crc kubenswrapper[4840]: I1209 16:57:10.994089 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:10 crc kubenswrapper[4840]: I1209 16:57:10.994128 4840 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 16:57:10 crc kubenswrapper[4840]: E1209 16:57:10.998914 4840 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Dec 09 16:57:11 crc kubenswrapper[4840]: I1209 16:57:11.705497 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:11 crc kubenswrapper[4840]: I1209 16:57:11.706820 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:11 crc kubenswrapper[4840]: I1209 16:57:11.706872 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:11 crc kubenswrapper[4840]: I1209 16:57:11.706892 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:11 crc kubenswrapper[4840]: I1209 16:57:11.895268 4840 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.391009 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.391247 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.392686 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.392767 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.392788 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.408277 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.709798 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.711002 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.711073 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.711100 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.936017 4840 trace.go:236] Trace[1947701716]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (09-Dec-2025 16:56:58.608) (total time: 14327ms): Dec 09 16:57:12 crc kubenswrapper[4840]: Trace[1947701716]: ---"Objects listed" error: 14327ms (16:57:12.935) Dec 09 16:57:12 crc kubenswrapper[4840]: Trace[1947701716]: [14.32728846s] [14.32728846s] END Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.936049 4840 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.936427 4840 trace.go:236] Trace[1270928226]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (09-Dec-2025 16:56:58.058) (total time: 14877ms): Dec 09 16:57:12 crc kubenswrapper[4840]: Trace[1270928226]: ---"Objects listed" error: 14877ms (16:57:12.936) Dec 09 16:57:12 crc kubenswrapper[4840]: Trace[1270928226]: [14.8778894s] [14.8778894s] END Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.936452 4840 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.939500 4840 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.940881 4840 trace.go:236] Trace[1412759269]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (09-Dec-2025 16:56:57.923) (total time: 15017ms): Dec 09 16:57:12 crc kubenswrapper[4840]: Trace[1412759269]: ---"Objects listed" error: 15017ms (16:57:12.940) Dec 09 16:57:12 crc kubenswrapper[4840]: Trace[1412759269]: [15.017766957s] [15.017766957s] END Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.940936 4840 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.948407 4840 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.993735 4840 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:44890->192.168.126.11:17697: read: connection reset by peer" start-of-body= Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.993820 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:44890->192.168.126.11:17697: read: connection reset by peer" Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.994311 4840 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 09 16:57:12 crc kubenswrapper[4840]: I1209 16:57:12.994383 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.542030 4840 apiserver.go:52] "Watching apiserver" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.546234 4840 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.546732 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf"] Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.547282 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.547382 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.547486 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.547518 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.547958 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.548181 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.548613 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.548660 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.548782 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.550729 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.551051 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.551281 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.552641 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.553221 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.553279 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.554832 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.555854 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.556130 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.593364 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.609463 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.621910 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.636589 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.639467 4840 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.644203 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.644748 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645021 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645093 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645131 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645165 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645227 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645260 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645290 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645321 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645353 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645384 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645414 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645445 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645590 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645627 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645629 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645658 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645692 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645698 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645724 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645754 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645792 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645833 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645865 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.645898 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646013 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646084 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646091 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646152 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646185 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646200 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646219 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646263 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646340 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646401 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646425 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646417 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646443 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646501 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.646537 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:57:14.146514397 +0000 UTC m=+20.137625140 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646600 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646634 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646667 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646722 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646743 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646771 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646824 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646875 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646925 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647004 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647051 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647100 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647154 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647206 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647258 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647313 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647362 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647414 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647459 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647508 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647562 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647611 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647657 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647706 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647754 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647804 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647854 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647902 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647952 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648036 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648086 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648137 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648182 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648238 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648324 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648377 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648418 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648465 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648507 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648560 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648611 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648656 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648701 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648750 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648807 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648856 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648903 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648949 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649029 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649079 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649127 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649172 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649229 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649282 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649331 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649382 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649430 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649487 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649537 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649590 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649632 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649668 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649703 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649739 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649776 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649813 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649849 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649883 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649920 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649954 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650251 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650330 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650379 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650422 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650464 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650513 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650554 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650593 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650633 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650669 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650707 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650825 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650861 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650900 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650953 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651041 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651096 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651151 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651204 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651253 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651309 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651370 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651411 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651458 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651508 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651565 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651615 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651655 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651696 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651734 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651771 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651809 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651846 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651883 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651917 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651953 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652024 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652064 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652102 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652141 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652179 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652215 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652252 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652330 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652365 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652399 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652434 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652471 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652507 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652543 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652606 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652645 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652680 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652716 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652754 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652793 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652826 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652863 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652900 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652943 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.653006 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.653043 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.653079 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.646757 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.653132 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647034 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647065 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647392 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647403 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647617 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647770 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.647752 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648214 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648218 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648507 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.648990 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649323 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649429 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.649428 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650608 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.650668 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651159 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651490 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651670 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651779 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.651909 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652219 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652489 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652508 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.652871 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.653008 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.653092 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.653268 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.653654 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.654046 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.654272 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.654414 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.654718 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.653118 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.654754 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.654716 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.654824 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.654850 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.654870 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.654915 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.654953 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655059 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655096 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655278 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655285 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655313 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655346 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655384 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655417 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655450 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655488 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655522 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.655558 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656010 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656040 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656072 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656114 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656150 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656171 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656195 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656219 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656228 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656294 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656358 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656413 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656459 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656497 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656541 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656583 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656605 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656621 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656652 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.656914 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.657118 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.657136 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.657152 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.657463 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.657549 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.657858 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.657958 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.657819 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.657900 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.658206 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.658560 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.658889 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.659091 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.659256 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.659410 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.659692 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.659864 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.660185 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.660232 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.660388 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.660636 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.660688 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.660864 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.660927 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.661480 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.661499 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.661685 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.662120 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.662262 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.662659 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.662837 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.662861 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.663378 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.663432 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.663610 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.663912 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.663941 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.664989 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.665351 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.666467 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.666546 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.666612 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.666939 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.666941 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.667055 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.667113 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.667245 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.667467 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.667480 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.667612 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.667629 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.667849 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.668160 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.668035 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.668163 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.668293 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.668766 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.668917 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.668929 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.669316 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.669459 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.669495 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.670197 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.670398 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.670415 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.670781 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.671069 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.671411 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.671767 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.671960 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.672076 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.672554 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.672659 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.672757 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.672933 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.673430 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.673680 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.674184 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.674196 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.674483 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.674627 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.674739 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.674598 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675109 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675130 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675171 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675206 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675236 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675285 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675328 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675372 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675413 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675430 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675449 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675499 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675543 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675577 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675618 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675652 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675690 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675731 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675766 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675536 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675800 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675942 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.675954 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676244 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676289 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676318 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676338 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676360 4840 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676400 4840 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676426 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676458 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676483 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676507 4840 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676531 4840 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676560 4840 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676586 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676609 4840 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676629 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676649 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676667 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676627 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676685 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676705 4840 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676725 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676743 4840 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676763 4840 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676794 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676814 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676834 4840 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676854 4840 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676874 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676893 4840 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676912 4840 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676930 4840 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.676948 4840 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677004 4840 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677024 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677041 4840 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677187 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677060 4840 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677703 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677721 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677729 4840 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677784 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677809 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677835 4840 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677854 4840 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677873 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.677891 4840 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678020 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678076 4840 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678107 4840 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678127 4840 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678146 4840 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678164 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678183 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678201 4840 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678220 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678238 4840 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678256 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678275 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678297 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678316 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678336 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678355 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678372 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678391 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678411 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678432 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678452 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678471 4840 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678490 4840 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678628 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678653 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678744 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678763 4840 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678913 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678912 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678938 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678961 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679003 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.678916 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679023 4840 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679073 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679098 4840 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679121 4840 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679184 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679203 4840 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679249 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679222 4840 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679323 4840 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679331 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679343 4840 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679387 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679443 4840 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679469 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679487 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679504 4840 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679521 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679590 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679610 4840 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679627 4840 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679645 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679662 4840 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679681 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679700 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679802 4840 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679819 4840 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679835 4840 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679850 4840 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679866 4840 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679884 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679905 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679922 4840 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679939 4840 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.679955 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680004 4840 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680027 4840 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680043 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680061 4840 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680077 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680094 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680111 4840 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680130 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680153 4840 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680204 4840 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680221 4840 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680239 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680256 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680273 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680291 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680308 4840 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680324 4840 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680341 4840 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680359 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680377 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680396 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680414 4840 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680433 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680450 4840 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680466 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680483 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680513 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680531 4840 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680551 4840 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680568 4840 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680586 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680602 4840 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680597 4840 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680620 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.684308 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.684328 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.684347 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.684380 4840 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.684402 4840 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680120 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680140 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680147 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680268 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680457 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680627 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.680794 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.681438 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.684483 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.681477 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.684668 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.681581 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.681802 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.681812 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.681873 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.681928 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.682234 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.682292 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.682613 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.682662 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.683070 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.683062 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.683264 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.683568 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.683826 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.683925 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.684135 4840 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.684861 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.684287 4840 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.684904 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:14.184882345 +0000 UTC m=+20.175992988 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.684363 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.684619 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.685027 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:14.184952327 +0000 UTC m=+20.176063070 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.685101 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.685761 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.686172 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.691227 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.701732 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.706036 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.707483 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.707774 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.709321 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.709590 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.709615 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.709631 4840 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.709698 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:14.209675688 +0000 UTC m=+20.200786331 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.709720 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.710106 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.710151 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.710172 4840 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:13 crc kubenswrapper[4840]: E1209 16:57:13.710257 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:14.210225344 +0000 UTC m=+20.201336017 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.711720 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.711995 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.712021 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.712443 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.713198 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.714227 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.715106 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.716583 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.716645 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.717088 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.719067 4840 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8" exitCode=255 Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.719110 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8"} Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.722268 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.724725 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.730341 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.730678 4840 scope.go:117] "RemoveContainer" containerID="efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.732320 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.735366 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.752040 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.757230 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.764491 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.775011 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.784295 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.784931 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.785034 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.785177 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.785298 4840 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.785391 4840 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.785954 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.786109 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.786204 4840 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.786276 4840 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.786358 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.786434 4840 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.785343 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.786511 4840 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.786650 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.786725 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.786835 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.786926 4840 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787018 4840 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787092 4840 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787172 4840 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787245 4840 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787373 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787453 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787527 4840 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787596 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787670 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787743 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787843 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.787914 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788009 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788082 4840 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788150 4840 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788233 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788313 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788386 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788455 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788526 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788591 4840 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788661 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788733 4840 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788796 4840 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788864 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.788933 4840 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789022 4840 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789082 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789138 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789194 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789284 4840 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789351 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789405 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789467 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789521 4840 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789572 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789625 4840 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789678 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.789736 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.794640 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.872389 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.885494 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.891468 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.898188 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.898255 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.901672 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.912013 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: W1209 16:57:13.921772 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-9e3b6bf2fb10dbaa96c4f41c5cb041ed6d8b9f32e3c19f26828704ae4d6ccd1b WatchSource:0}: Error finding container 9e3b6bf2fb10dbaa96c4f41c5cb041ed6d8b9f32e3c19f26828704ae4d6ccd1b: Status 404 returned error can't find the container with id 9e3b6bf2fb10dbaa96c4f41c5cb041ed6d8b9f32e3c19f26828704ae4d6ccd1b Dec 09 16:57:13 crc kubenswrapper[4840]: W1209 16:57:13.931085 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-156f532956d681c159b0df7dc6e9418b4d7a31ebe6aec0e8b3939f1a53635422 WatchSource:0}: Error finding container 156f532956d681c159b0df7dc6e9418b4d7a31ebe6aec0e8b3939f1a53635422: Status 404 returned error can't find the container with id 156f532956d681c159b0df7dc6e9418b4d7a31ebe6aec0e8b3939f1a53635422 Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.935797 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.957387 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.978747 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:13 crc kubenswrapper[4840]: I1209 16:57:13.990396 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.004526 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.015877 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.028084 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.039310 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.050074 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.063643 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.078834 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.092480 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.104202 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.115948 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.192040 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.192113 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.192143 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.192252 4840 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.192308 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:15.192291026 +0000 UTC m=+21.183401669 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.192634 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:57:15.192623545 +0000 UTC m=+21.183734178 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.192711 4840 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.192744 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:15.192734489 +0000 UTC m=+21.183845122 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.292739 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.292789 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.292896 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.292912 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.292924 4840 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.292920 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.292960 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.292992 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:15.292978511 +0000 UTC m=+21.284089144 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.292995 4840 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.293050 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:15.293035072 +0000 UTC m=+21.284145705 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.613035 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.613866 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.614582 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.615862 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.616463 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.617421 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.618036 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.618583 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.619708 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.620346 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.621313 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.622037 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.623015 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.623540 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.624535 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.625100 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.625678 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.626543 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.627500 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.629537 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.630024 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.630575 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.631320 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.631912 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.632651 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.634834 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.636305 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.638117 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.639117 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.642377 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.643376 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.644314 4840 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.644442 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.645844 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.646345 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.646747 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.647938 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.648614 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.650070 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.651574 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.654099 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.654654 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.655304 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.658133 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.659493 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.661785 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.662745 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.665187 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.666395 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.672477 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.674385 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.675506 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.676305 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.678705 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.682314 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.683594 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.684327 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.703951 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.723413 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c"} Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.723478 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"7e41416f90bb71d42d4fdc3aa0796616af2635e564f8834eda19cf8f1fc08acd"} Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.726178 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.728425 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e"} Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.728793 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.729994 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"156f532956d681c159b0df7dc6e9418b4d7a31ebe6aec0e8b3939f1a53635422"} Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.732956 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea"} Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.733041 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec"} Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.733077 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9e3b6bf2fb10dbaa96c4f41c5cb041ed6d8b9f32e3c19f26828704ae4d6ccd1b"} Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.734873 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: E1209 16:57:14.740062 4840 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.782804 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.805466 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.824944 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.838531 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.851454 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.865728 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.878088 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.889771 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.900763 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.918890 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:14 crc kubenswrapper[4840]: I1209 16:57:14.937439 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:15 crc kubenswrapper[4840]: I1209 16:57:15.202262 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:15 crc kubenswrapper[4840]: I1209 16:57:15.202368 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.202447 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:57:17.2024192 +0000 UTC m=+23.193529853 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.202496 4840 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.202550 4840 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.202555 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:17.202539513 +0000 UTC m=+23.193650166 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.202605 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:17.202592785 +0000 UTC m=+23.193703438 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:15 crc kubenswrapper[4840]: I1209 16:57:15.202495 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:15 crc kubenswrapper[4840]: I1209 16:57:15.303708 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:15 crc kubenswrapper[4840]: I1209 16:57:15.303776 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.303910 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.303929 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.303941 4840 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.304006 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:17.30399175 +0000 UTC m=+23.295102383 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.304329 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.304346 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.304358 4840 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.304388 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:17.304377792 +0000 UTC m=+23.295488425 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:15 crc kubenswrapper[4840]: I1209 16:57:15.607870 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:15 crc kubenswrapper[4840]: I1209 16:57:15.607921 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:15 crc kubenswrapper[4840]: I1209 16:57:15.607935 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.608034 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.608173 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:15 crc kubenswrapper[4840]: E1209 16:57:15.608272 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:16 crc kubenswrapper[4840]: I1209 16:57:16.743557 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d"} Dec 09 16:57:16 crc kubenswrapper[4840]: I1209 16:57:16.764437 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:16Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:16 crc kubenswrapper[4840]: I1209 16:57:16.784569 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:16Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:16 crc kubenswrapper[4840]: I1209 16:57:16.802835 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:16Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:16 crc kubenswrapper[4840]: I1209 16:57:16.817944 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:16Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:16 crc kubenswrapper[4840]: I1209 16:57:16.848350 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:16Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:16 crc kubenswrapper[4840]: I1209 16:57:16.879683 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:16Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:16 crc kubenswrapper[4840]: I1209 16:57:16.892589 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:16Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:16 crc kubenswrapper[4840]: I1209 16:57:16.905353 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:16Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.221227 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:57:21.221191285 +0000 UTC m=+27.212301958 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.221062 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.221650 4840 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.221760 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:21.221736491 +0000 UTC m=+27.212847174 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.221463 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.222435 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.222629 4840 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.222737 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:21.22271343 +0000 UTC m=+27.213824103 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.323213 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.323482 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.323417 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.323673 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.323610 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.323775 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.323792 4840 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.323855 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:21.323834857 +0000 UTC m=+27.314945500 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.323746 4840 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.324051 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:21.324039363 +0000 UTC m=+27.315149986 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.399185 4840 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.401341 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.401377 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.401388 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.401440 4840 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.409496 4840 kubelet_node_status.go:115] "Node was previously registered" node="crc" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.409757 4840 kubelet_node_status.go:79] "Successfully registered node" node="crc" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.410744 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.410789 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.410800 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.410815 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.410826 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:17Z","lastTransitionTime":"2025-12-09T16:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.430180 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:17Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.433871 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.433929 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.433942 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.433981 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.433996 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:17Z","lastTransitionTime":"2025-12-09T16:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.452527 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:17Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.456858 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.456926 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.456947 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.457000 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.457029 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:17Z","lastTransitionTime":"2025-12-09T16:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.471369 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:17Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.475377 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.475422 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.475432 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.475447 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.475458 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:17Z","lastTransitionTime":"2025-12-09T16:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.489393 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:17Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.493927 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.493981 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.493991 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.494005 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.494015 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:17Z","lastTransitionTime":"2025-12-09T16:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.508597 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:17Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.508781 4840 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.510424 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.510459 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.510471 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.510488 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.510499 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:17Z","lastTransitionTime":"2025-12-09T16:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.595239 4840 csr.go:261] certificate signing request csr-95kts is approved, waiting to be issued Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.608466 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.608519 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.608473 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.608605 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.608686 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:17 crc kubenswrapper[4840]: E1209 16:57:17.608815 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.612877 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.612922 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.612933 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.612950 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.612976 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:17Z","lastTransitionTime":"2025-12-09T16:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.613150 4840 csr.go:257] certificate signing request csr-95kts is issued Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.714991 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.715024 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.715034 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.715048 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.715057 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:17Z","lastTransitionTime":"2025-12-09T16:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.817469 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.818007 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.818069 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.818151 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.818220 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:17Z","lastTransitionTime":"2025-12-09T16:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.920539 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.920585 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.920595 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.920611 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:17 crc kubenswrapper[4840]: I1209 16:57:17.920620 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:17Z","lastTransitionTime":"2025-12-09T16:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.023186 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.023230 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.023241 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.023256 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.023268 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:18Z","lastTransitionTime":"2025-12-09T16:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.083111 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-tdmlx"] Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.083452 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-tdmlx" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.083552 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-n2cr9"] Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.083978 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.086743 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.087137 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.087304 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.087369 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.087451 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.087690 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.087822 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.087931 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.100186 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.114674 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.125242 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.125280 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.125290 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.125308 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.125322 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:18Z","lastTransitionTime":"2025-12-09T16:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130131 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130237 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/f552c10c-4afe-437f-88f7-09946da0d260-hosts-file\") pod \"node-resolver-tdmlx\" (UID: \"f552c10c-4afe-437f-88f7-09946da0d260\") " pod="openshift-dns/node-resolver-tdmlx" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130473 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-system-cni-dir\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130504 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-run-netns\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130526 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twbtv\" (UniqueName: \"kubernetes.io/projected/f552c10c-4afe-437f-88f7-09946da0d260-kube-api-access-twbtv\") pod \"node-resolver-tdmlx\" (UID: \"f552c10c-4afe-437f-88f7-09946da0d260\") " pod="openshift-dns/node-resolver-tdmlx" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130542 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-os-release\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130590 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-cnibin\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130606 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-conf-dir\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130621 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-etc-kubernetes\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130655 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-cni-dir\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130670 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-daemon-config\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130683 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vtlb\" (UniqueName: \"kubernetes.io/projected/9c465ec1-5011-46d7-bcf3-df79d8b4543b-kube-api-access-2vtlb\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130702 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-var-lib-cni-bin\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130715 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-var-lib-cni-multus\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130729 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-socket-dir-parent\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130744 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-run-k8s-cni-cncf-io\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130759 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-hostroot\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130792 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9c465ec1-5011-46d7-bcf3-df79d8b4543b-cni-binary-copy\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130806 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-var-lib-kubelet\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.130821 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-run-multus-certs\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.141390 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.155001 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.169363 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.182644 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.211453 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.227266 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.227306 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.227315 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.227327 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.227337 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:18Z","lastTransitionTime":"2025-12-09T16:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231679 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-var-lib-cni-bin\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231735 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-var-lib-cni-multus\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231755 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-daemon-config\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231770 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vtlb\" (UniqueName: \"kubernetes.io/projected/9c465ec1-5011-46d7-bcf3-df79d8b4543b-kube-api-access-2vtlb\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231788 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-hostroot\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231816 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-socket-dir-parent\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231833 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-run-k8s-cni-cncf-io\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231840 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-var-lib-cni-multus\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231855 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9c465ec1-5011-46d7-bcf3-df79d8b4543b-cni-binary-copy\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231875 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-var-lib-kubelet\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231846 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-var-lib-cni-bin\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231895 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-run-multus-certs\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231932 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-run-multus-certs\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231944 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-var-lib-kubelet\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231999 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/f552c10c-4afe-437f-88f7-09946da0d260-hosts-file\") pod \"node-resolver-tdmlx\" (UID: \"f552c10c-4afe-437f-88f7-09946da0d260\") " pod="openshift-dns/node-resolver-tdmlx" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232014 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-socket-dir-parent\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232052 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/f552c10c-4afe-437f-88f7-09946da0d260-hosts-file\") pod \"node-resolver-tdmlx\" (UID: \"f552c10c-4afe-437f-88f7-09946da0d260\") " pod="openshift-dns/node-resolver-tdmlx" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231939 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-hostroot\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.231883 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-run-k8s-cni-cncf-io\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232081 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-system-cni-dir\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232037 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-system-cni-dir\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232137 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-run-netns\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232158 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-os-release\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232192 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-host-run-netns\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232232 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-os-release\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232274 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twbtv\" (UniqueName: \"kubernetes.io/projected/f552c10c-4afe-437f-88f7-09946da0d260-kube-api-access-twbtv\") pod \"node-resolver-tdmlx\" (UID: \"f552c10c-4afe-437f-88f7-09946da0d260\") " pod="openshift-dns/node-resolver-tdmlx" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232318 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-etc-kubernetes\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232378 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-cni-dir\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232400 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-cnibin\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232415 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-conf-dir\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232461 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-conf-dir\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232378 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-etc-kubernetes\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232484 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9c465ec1-5011-46d7-bcf3-df79d8b4543b-cni-binary-copy\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232485 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-daemon-config\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232510 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-cnibin\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.232513 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9c465ec1-5011-46d7-bcf3-df79d8b4543b-multus-cni-dir\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.234922 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.251350 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vtlb\" (UniqueName: \"kubernetes.io/projected/9c465ec1-5011-46d7-bcf3-df79d8b4543b-kube-api-access-2vtlb\") pod \"multus-n2cr9\" (UID: \"9c465ec1-5011-46d7-bcf3-df79d8b4543b\") " pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.259791 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.260065 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twbtv\" (UniqueName: \"kubernetes.io/projected/f552c10c-4afe-437f-88f7-09946da0d260-kube-api-access-twbtv\") pod \"node-resolver-tdmlx\" (UID: \"f552c10c-4afe-437f-88f7-09946da0d260\") " pod="openshift-dns/node-resolver-tdmlx" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.274587 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.286657 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.299446 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.312807 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.323672 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.329099 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.329137 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.329145 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.329160 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.329171 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:18Z","lastTransitionTime":"2025-12-09T16:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.333634 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.344477 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.352746 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.365882 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.398064 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-tdmlx" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.410017 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-n2cr9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.432593 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.432626 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.432634 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.432649 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.432657 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:18Z","lastTransitionTime":"2025-12-09T16:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.476623 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-fnwb4"] Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.477480 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-kr6l2"] Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.477739 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.477999 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.479495 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.480060 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.480146 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.480603 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.481176 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.481216 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.481471 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.481642 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lpfl9"] Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.482376 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.485471 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.485603 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.485805 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.486481 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.487255 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.487543 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.492482 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.494350 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.512442 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534020 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534777 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-var-lib-openvswitch\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534807 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-config\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534822 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gm99\" (UniqueName: \"kubernetes.io/projected/33826d17-3660-4069-b173-accfbe7e24b3-kube-api-access-5gm99\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534839 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-system-cni-dir\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534855 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2qm4\" (UniqueName: \"kubernetes.io/projected/fe6d320b-3a64-4724-93af-500d38c77974-kube-api-access-w2qm4\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534869 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-ovn-kubernetes\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534883 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/33826d17-3660-4069-b173-accfbe7e24b3-ovn-node-metrics-cert\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534907 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-log-socket\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534929 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fe6d320b-3a64-4724-93af-500d38c77974-proxy-tls\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534944 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534961 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.534992 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-systemd\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535005 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-etc-openvswitch\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535006 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535026 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535027 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6ad5e84b-9a8c-4644-9327-66c2170ffa58-cni-binary-copy\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535036 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535043 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-kubelet\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535053 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535059 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-netns\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535075 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fe6d320b-3a64-4724-93af-500d38c77974-rootfs\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535065 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:18Z","lastTransitionTime":"2025-12-09T16:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535089 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-netd\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535111 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxdfs\" (UniqueName: \"kubernetes.io/projected/6ad5e84b-9a8c-4644-9327-66c2170ffa58-kube-api-access-fxdfs\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535127 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-ovn\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535142 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-env-overrides\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535159 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fe6d320b-3a64-4724-93af-500d38c77974-mcd-auth-proxy-config\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535174 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-os-release\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535229 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6ad5e84b-9a8c-4644-9327-66c2170ffa58-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535264 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-systemd-units\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535285 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-node-log\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535344 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-cnibin\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535373 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-slash\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535397 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-openvswitch\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535429 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-bin\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.535464 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-script-lib\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.546345 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.556338 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.568821 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.579658 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.591489 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.607590 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.613943 4840 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-12-09 16:52:17 +0000 UTC, rotation deadline is 2026-10-08 18:50:48.548288863 +0000 UTC Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.614021 4840 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7273h53m29.934270478s for next certificate rotation Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.621262 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636359 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-systemd-units\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636399 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-node-log\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636424 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fe6d320b-3a64-4724-93af-500d38c77974-mcd-auth-proxy-config\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636445 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-os-release\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636467 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6ad5e84b-9a8c-4644-9327-66c2170ffa58-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636487 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-slash\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636473 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-systemd-units\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636505 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-openvswitch\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636529 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-cnibin\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636550 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-bin\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636570 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-script-lib\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636597 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-os-release\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636594 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-var-lib-openvswitch\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636642 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-node-log\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636660 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-config\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636682 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gm99\" (UniqueName: \"kubernetes.io/projected/33826d17-3660-4069-b173-accfbe7e24b3-kube-api-access-5gm99\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636706 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2qm4\" (UniqueName: \"kubernetes.io/projected/fe6d320b-3a64-4724-93af-500d38c77974-kube-api-access-w2qm4\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636728 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-system-cni-dir\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636749 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-ovn-kubernetes\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636770 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/33826d17-3660-4069-b173-accfbe7e24b3-ovn-node-metrics-cert\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636809 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fe6d320b-3a64-4724-93af-500d38c77974-proxy-tls\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636832 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636854 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-log-socket\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636878 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636908 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6ad5e84b-9a8c-4644-9327-66c2170ffa58-cni-binary-copy\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636929 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-systemd\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636950 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-etc-openvswitch\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.636998 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-kubelet\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.637029 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-netns\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.637051 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fe6d320b-3a64-4724-93af-500d38c77974-rootfs\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.637071 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-netd\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.637105 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-env-overrides\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.637131 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxdfs\" (UniqueName: \"kubernetes.io/projected/6ad5e84b-9a8c-4644-9327-66c2170ffa58-kube-api-access-fxdfs\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.637152 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-ovn\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.637257 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-ovn\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.637415 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fe6d320b-3a64-4724-93af-500d38c77974-mcd-auth-proxy-config\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638177 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6ad5e84b-9a8c-4644-9327-66c2170ffa58-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638267 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638303 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-log-socket\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638336 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638506 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-netns\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638545 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-bin\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638561 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-systemd\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638580 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-system-cni-dir\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638601 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-openvswitch\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638638 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6ad5e84b-9a8c-4644-9327-66c2170ffa58-cnibin\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638661 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-netd\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638704 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-var-lib-openvswitch\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638712 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fe6d320b-3a64-4724-93af-500d38c77974-rootfs\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638737 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-kubelet\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638682 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-ovn-kubernetes\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.638887 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-slash\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.639170 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-etc-openvswitch\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.639177 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-config\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.639233 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-script-lib\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.639313 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-env-overrides\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.639999 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6ad5e84b-9a8c-4644-9327-66c2170ffa58-cni-binary-copy\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.640391 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.640413 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.640425 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.640440 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.640451 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:18Z","lastTransitionTime":"2025-12-09T16:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.643646 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fe6d320b-3a64-4724-93af-500d38c77974-proxy-tls\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.644074 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/33826d17-3660-4069-b173-accfbe7e24b3-ovn-node-metrics-cert\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.647240 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.663603 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gm99\" (UniqueName: \"kubernetes.io/projected/33826d17-3660-4069-b173-accfbe7e24b3-kube-api-access-5gm99\") pod \"ovnkube-node-lpfl9\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.668039 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.673653 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxdfs\" (UniqueName: \"kubernetes.io/projected/6ad5e84b-9a8c-4644-9327-66c2170ffa58-kube-api-access-fxdfs\") pod \"multus-additional-cni-plugins-fnwb4\" (UID: \"6ad5e84b-9a8c-4644-9327-66c2170ffa58\") " pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.682409 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2qm4\" (UniqueName: \"kubernetes.io/projected/fe6d320b-3a64-4724-93af-500d38c77974-kube-api-access-w2qm4\") pod \"machine-config-daemon-kr6l2\" (UID: \"fe6d320b-3a64-4724-93af-500d38c77974\") " pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.705430 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.732419 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.742417 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.742452 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.742461 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.742473 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.742482 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:18Z","lastTransitionTime":"2025-12-09T16:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.746836 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.749597 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n2cr9" event={"ID":"9c465ec1-5011-46d7-bcf3-df79d8b4543b","Type":"ContainerStarted","Data":"973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.749647 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n2cr9" event={"ID":"9c465ec1-5011-46d7-bcf3-df79d8b4543b","Type":"ContainerStarted","Data":"879519cbe5f27d8830c32fd0ecbed14d25667807224a8ee4c4a177e1a9673e5b"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.750509 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-tdmlx" event={"ID":"f552c10c-4afe-437f-88f7-09946da0d260","Type":"ContainerStarted","Data":"bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.750554 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-tdmlx" event={"ID":"f552c10c-4afe-437f-88f7-09946da0d260","Type":"ContainerStarted","Data":"4d53c313763904e84bb7b4bc76db2977fa2ad9a1375c297a703e19a71d4de193"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.761828 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.774559 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.786296 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.795257 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.797496 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: W1209 16:57:18.804086 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ad5e84b_9a8c_4644_9327_66c2170ffa58.slice/crio-3b85bfb3f98edd0e3612e50c191b57ab87d09058c1445d797ee6486f4874325e WatchSource:0}: Error finding container 3b85bfb3f98edd0e3612e50c191b57ab87d09058c1445d797ee6486f4874325e: Status 404 returned error can't find the container with id 3b85bfb3f98edd0e3612e50c191b57ab87d09058c1445d797ee6486f4874325e Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.815294 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.824401 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.829156 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.832260 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:18 crc kubenswrapper[4840]: W1209 16:57:18.836904 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe6d320b_3a64_4724_93af_500d38c77974.slice/crio-edc12be0e1f4db9fbb0eb1c586bd8376000087baef1fade9a5483c1233f64487 WatchSource:0}: Error finding container edc12be0e1f4db9fbb0eb1c586bd8376000087baef1fade9a5483c1233f64487: Status 404 returned error can't find the container with id edc12be0e1f4db9fbb0eb1c586bd8376000087baef1fade9a5483c1233f64487 Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.842532 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.847075 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.847101 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.847112 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.847128 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.847139 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:18Z","lastTransitionTime":"2025-12-09T16:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.857414 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.866556 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.878874 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.891148 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.903106 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.924653 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.936282 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.949582 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.949622 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.949633 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.949764 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.949780 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:18Z","lastTransitionTime":"2025-12-09T16:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.951619 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.964210 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.973722 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.985302 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:18 crc kubenswrapper[4840]: I1209 16:57:18.999622 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:18Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.011617 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.027255 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.039385 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.052730 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.052772 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.052783 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.052800 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.052826 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:19Z","lastTransitionTime":"2025-12-09T16:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.155316 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.155342 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.155351 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.155363 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.155373 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:19Z","lastTransitionTime":"2025-12-09T16:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.258457 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.258507 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.258518 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.258537 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.258549 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:19Z","lastTransitionTime":"2025-12-09T16:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.361247 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.361273 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.361282 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.361295 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.361303 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:19Z","lastTransitionTime":"2025-12-09T16:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.464179 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.464222 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.464231 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.464244 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.464259 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:19Z","lastTransitionTime":"2025-12-09T16:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.566865 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.566923 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.566934 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.566955 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.566981 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:19Z","lastTransitionTime":"2025-12-09T16:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.607950 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.608076 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:19 crc kubenswrapper[4840]: E1209 16:57:19.608223 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.608272 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:19 crc kubenswrapper[4840]: E1209 16:57:19.608410 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:19 crc kubenswrapper[4840]: E1209 16:57:19.608511 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.669747 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.670180 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.670234 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.670258 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.670275 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:19Z","lastTransitionTime":"2025-12-09T16:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.770110 4840 generic.go:334] "Generic (PLEG): container finished" podID="6ad5e84b-9a8c-4644-9327-66c2170ffa58" containerID="29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab" exitCode=0 Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.770153 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" event={"ID":"6ad5e84b-9a8c-4644-9327-66c2170ffa58","Type":"ContainerDied","Data":"29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.770493 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" event={"ID":"6ad5e84b-9a8c-4644-9327-66c2170ffa58","Type":"ContainerStarted","Data":"3b85bfb3f98edd0e3612e50c191b57ab87d09058c1445d797ee6486f4874325e"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.771645 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca" exitCode=0 Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.771685 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.771700 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"e472c683ad5cc7d154b1aa609eb0139e4516abe37b911019e88bd9286833f925"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.773235 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.773290 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.773311 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.773337 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.773358 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:19Z","lastTransitionTime":"2025-12-09T16:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.775579 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.775623 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.775640 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"edc12be0e1f4db9fbb0eb1c586bd8376000087baef1fade9a5483c1233f64487"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.791272 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.806482 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.825513 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.845182 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.864696 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.876696 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.876751 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.876764 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.876780 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.876791 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:19Z","lastTransitionTime":"2025-12-09T16:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.881285 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.905281 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.917946 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.936984 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.956878 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.967959 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-4t29t"] Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.968621 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-4t29t" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.970531 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.970956 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 09 16:57:19 crc kubenswrapper[4840]: I1209 16:57:19.971591 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.011053 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.011100 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.011112 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.011128 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.011143 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:20Z","lastTransitionTime":"2025-12-09T16:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.012211 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:19Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.016828 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.026593 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.038039 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.051751 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.061504 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fbca946d-b14a-4c23-b383-813d02937eea-host\") pod \"node-ca-4t29t\" (UID: \"fbca946d-b14a-4c23-b383-813d02937eea\") " pod="openshift-image-registry/node-ca-4t29t" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.061545 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/fbca946d-b14a-4c23-b383-813d02937eea-serviceca\") pod \"node-ca-4t29t\" (UID: \"fbca946d-b14a-4c23-b383-813d02937eea\") " pod="openshift-image-registry/node-ca-4t29t" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.061600 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjmqx\" (UniqueName: \"kubernetes.io/projected/fbca946d-b14a-4c23-b383-813d02937eea-kube-api-access-sjmqx\") pod \"node-ca-4t29t\" (UID: \"fbca946d-b14a-4c23-b383-813d02937eea\") " pod="openshift-image-registry/node-ca-4t29t" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.069111 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.084635 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.106478 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.114800 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.114984 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.115101 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.115224 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.115315 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:20Z","lastTransitionTime":"2025-12-09T16:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.118933 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.133366 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.145385 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.161622 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.162014 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/fbca946d-b14a-4c23-b383-813d02937eea-serviceca\") pod \"node-ca-4t29t\" (UID: \"fbca946d-b14a-4c23-b383-813d02937eea\") " pod="openshift-image-registry/node-ca-4t29t" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.162055 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjmqx\" (UniqueName: \"kubernetes.io/projected/fbca946d-b14a-4c23-b383-813d02937eea-kube-api-access-sjmqx\") pod \"node-ca-4t29t\" (UID: \"fbca946d-b14a-4c23-b383-813d02937eea\") " pod="openshift-image-registry/node-ca-4t29t" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.162084 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fbca946d-b14a-4c23-b383-813d02937eea-host\") pod \"node-ca-4t29t\" (UID: \"fbca946d-b14a-4c23-b383-813d02937eea\") " pod="openshift-image-registry/node-ca-4t29t" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.162144 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fbca946d-b14a-4c23-b383-813d02937eea-host\") pod \"node-ca-4t29t\" (UID: \"fbca946d-b14a-4c23-b383-813d02937eea\") " pod="openshift-image-registry/node-ca-4t29t" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.163457 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/fbca946d-b14a-4c23-b383-813d02937eea-serviceca\") pod \"node-ca-4t29t\" (UID: \"fbca946d-b14a-4c23-b383-813d02937eea\") " pod="openshift-image-registry/node-ca-4t29t" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.178714 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.192615 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjmqx\" (UniqueName: \"kubernetes.io/projected/fbca946d-b14a-4c23-b383-813d02937eea-kube-api-access-sjmqx\") pod \"node-ca-4t29t\" (UID: \"fbca946d-b14a-4c23-b383-813d02937eea\") " pod="openshift-image-registry/node-ca-4t29t" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.199592 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.212107 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.217066 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.217101 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.217109 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.217124 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.217134 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:20Z","lastTransitionTime":"2025-12-09T16:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.225407 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.238699 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.254716 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.319321 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.319357 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.319367 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.319383 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.319392 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:20Z","lastTransitionTime":"2025-12-09T16:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.347748 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-4t29t" Dec 09 16:57:20 crc kubenswrapper[4840]: W1209 16:57:20.401758 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfbca946d_b14a_4c23_b383_813d02937eea.slice/crio-926781a78488ef9317b0b0703cb4819fff96d8a19dde0007f07029cd52b6666c WatchSource:0}: Error finding container 926781a78488ef9317b0b0703cb4819fff96d8a19dde0007f07029cd52b6666c: Status 404 returned error can't find the container with id 926781a78488ef9317b0b0703cb4819fff96d8a19dde0007f07029cd52b6666c Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.421348 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.421379 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.421391 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.421406 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.421416 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:20Z","lastTransitionTime":"2025-12-09T16:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.524390 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.524457 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.524471 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.524490 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.524503 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:20Z","lastTransitionTime":"2025-12-09T16:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.626524 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.626556 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.626564 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.626577 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.626589 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:20Z","lastTransitionTime":"2025-12-09T16:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.729383 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.729432 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.729447 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.729467 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.729482 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:20Z","lastTransitionTime":"2025-12-09T16:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.781676 4840 generic.go:334] "Generic (PLEG): container finished" podID="6ad5e84b-9a8c-4644-9327-66c2170ffa58" containerID="91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e" exitCode=0 Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.781760 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" event={"ID":"6ad5e84b-9a8c-4644-9327-66c2170ffa58","Type":"ContainerDied","Data":"91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.783915 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-4t29t" event={"ID":"fbca946d-b14a-4c23-b383-813d02937eea","Type":"ContainerStarted","Data":"151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.783948 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-4t29t" event={"ID":"fbca946d-b14a-4c23-b383-813d02937eea","Type":"ContainerStarted","Data":"926781a78488ef9317b0b0703cb4819fff96d8a19dde0007f07029cd52b6666c"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.789688 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.789733 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.789747 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.789760 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.789771 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.789782 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.805821 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.831009 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.837735 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.837777 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.837790 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.837813 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.837826 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:20Z","lastTransitionTime":"2025-12-09T16:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.852718 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.879728 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.891796 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.908495 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.922611 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.937035 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.941726 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.941773 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.941784 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.941804 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.941817 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:20Z","lastTransitionTime":"2025-12-09T16:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.952205 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:20 crc kubenswrapper[4840]: I1209 16:57:20.983819 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:20Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.018335 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.033712 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.043750 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.043781 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.043792 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.043805 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.043814 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:21Z","lastTransitionTime":"2025-12-09T16:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.044997 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.058599 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.070005 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.082031 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.090921 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.100821 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.109001 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.122523 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.136609 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.145899 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.145922 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.145931 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.145943 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.145953 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:21Z","lastTransitionTime":"2025-12-09T16:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.149754 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.160869 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.174549 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.189909 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.204388 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.219587 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.237908 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.249424 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.249456 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.249467 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.249485 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.249498 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:21Z","lastTransitionTime":"2025-12-09T16:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.274493 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.274808 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:57:29.274758922 +0000 UTC m=+35.265869585 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.275141 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.275207 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.275352 4840 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.275366 4840 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.275437 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:29.275420811 +0000 UTC m=+35.266531474 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.275468 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:29.275453632 +0000 UTC m=+35.266564295 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.352311 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.352354 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.352366 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.352380 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.352392 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:21Z","lastTransitionTime":"2025-12-09T16:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.376260 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.376352 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.376439 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.376461 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.376472 4840 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.376522 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:29.376507908 +0000 UTC m=+35.367618541 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.376578 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.376614 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.376634 4840 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.376718 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:29.376695183 +0000 UTC m=+35.367805856 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.458552 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.458644 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.458668 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.458696 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.458719 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:21Z","lastTransitionTime":"2025-12-09T16:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.561915 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.561959 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.561997 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.562015 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.562027 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:21Z","lastTransitionTime":"2025-12-09T16:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.607689 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.607760 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.607812 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.607866 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.607892 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:21 crc kubenswrapper[4840]: E1209 16:57:21.608115 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.664937 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.665021 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.665035 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.665055 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.665071 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:21Z","lastTransitionTime":"2025-12-09T16:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.770203 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.770260 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.770293 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.770321 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.770343 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:21Z","lastTransitionTime":"2025-12-09T16:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.798444 4840 generic.go:334] "Generic (PLEG): container finished" podID="6ad5e84b-9a8c-4644-9327-66c2170ffa58" containerID="b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f" exitCode=0 Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.798647 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" event={"ID":"6ad5e84b-9a8c-4644-9327-66c2170ffa58","Type":"ContainerDied","Data":"b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f"} Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.819269 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.850564 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.867009 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.872419 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.872481 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.872501 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.872525 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.872543 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:21Z","lastTransitionTime":"2025-12-09T16:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.881053 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.899796 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.913265 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.928753 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.957300 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.970246 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.974986 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.975022 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.975030 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.975044 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.975054 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:21Z","lastTransitionTime":"2025-12-09T16:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.985083 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:21 crc kubenswrapper[4840]: I1209 16:57:21.999057 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:21Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.011398 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.024718 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.036873 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.077135 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.077160 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.077168 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.077182 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.077191 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:22Z","lastTransitionTime":"2025-12-09T16:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.180394 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.180461 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.180480 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.180505 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.180534 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:22Z","lastTransitionTime":"2025-12-09T16:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.283371 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.283464 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.283483 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.283517 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.283538 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:22Z","lastTransitionTime":"2025-12-09T16:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.386944 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.387042 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.387059 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.387083 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.387102 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:22Z","lastTransitionTime":"2025-12-09T16:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.489511 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.489599 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.489623 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.489655 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.489679 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:22Z","lastTransitionTime":"2025-12-09T16:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.593620 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.593687 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.593706 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.593733 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.593753 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:22Z","lastTransitionTime":"2025-12-09T16:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.696640 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.696696 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.696713 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.696738 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.696756 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:22Z","lastTransitionTime":"2025-12-09T16:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.800122 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.800457 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.800565 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.800649 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.800750 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:22Z","lastTransitionTime":"2025-12-09T16:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.805026 4840 generic.go:334] "Generic (PLEG): container finished" podID="6ad5e84b-9a8c-4644-9327-66c2170ffa58" containerID="bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c" exitCode=0 Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.805078 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" event={"ID":"6ad5e84b-9a8c-4644-9327-66c2170ffa58","Type":"ContainerDied","Data":"bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c"} Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.815146 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c"} Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.828145 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.847293 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.866854 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.881603 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.895987 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.903673 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.903718 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.903730 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.903749 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.903764 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:22Z","lastTransitionTime":"2025-12-09T16:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.912472 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.929905 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.942841 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.955372 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.969616 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.981584 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:22 crc kubenswrapper[4840]: I1209 16:57:22.998439 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:22Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.006411 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.006480 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.006503 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.006533 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.006557 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:23Z","lastTransitionTime":"2025-12-09T16:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.017219 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.044111 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.108761 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.108797 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.108805 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.108819 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.108828 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:23Z","lastTransitionTime":"2025-12-09T16:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.212392 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.212491 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.212515 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.212548 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.212570 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:23Z","lastTransitionTime":"2025-12-09T16:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.315084 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.315143 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.315166 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.315193 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.315216 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:23Z","lastTransitionTime":"2025-12-09T16:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.424992 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.425300 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.425308 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.425322 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.425330 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:23Z","lastTransitionTime":"2025-12-09T16:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.528522 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.528583 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.528601 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.528624 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.528642 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:23Z","lastTransitionTime":"2025-12-09T16:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.608201 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:23 crc kubenswrapper[4840]: E1209 16:57:23.608410 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.608510 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:23 crc kubenswrapper[4840]: E1209 16:57:23.608597 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.608665 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:23 crc kubenswrapper[4840]: E1209 16:57:23.608739 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.632823 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.632890 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.632909 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.632933 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.632952 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:23Z","lastTransitionTime":"2025-12-09T16:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.736664 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.736705 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.736717 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.736733 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.736747 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:23Z","lastTransitionTime":"2025-12-09T16:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.823875 4840 generic.go:334] "Generic (PLEG): container finished" podID="6ad5e84b-9a8c-4644-9327-66c2170ffa58" containerID="9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd" exitCode=0 Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.823915 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" event={"ID":"6ad5e84b-9a8c-4644-9327-66c2170ffa58","Type":"ContainerDied","Data":"9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd"} Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.839937 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.840008 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.840020 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.840035 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.840046 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:23Z","lastTransitionTime":"2025-12-09T16:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.840646 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.862312 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.883294 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.903597 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.912872 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.921583 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.931876 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.942020 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.942048 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.942056 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.942069 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.942080 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:23Z","lastTransitionTime":"2025-12-09T16:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.942536 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.952368 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.964344 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.978743 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.989033 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:23 crc kubenswrapper[4840]: I1209 16:57:23.998836 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:23Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.008855 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.044268 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.044333 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.044357 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.044385 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.044403 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:24Z","lastTransitionTime":"2025-12-09T16:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.146773 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.146804 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.146812 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.146825 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.146834 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:24Z","lastTransitionTime":"2025-12-09T16:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.249303 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.249635 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.249832 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.250080 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.250327 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:24Z","lastTransitionTime":"2025-12-09T16:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.353473 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.353515 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.353524 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.353539 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.353549 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:24Z","lastTransitionTime":"2025-12-09T16:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.427369 4840 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.463170 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.463201 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.463209 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.463222 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.463232 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:24Z","lastTransitionTime":"2025-12-09T16:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.565658 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.565704 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.565720 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.565737 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.565753 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:24Z","lastTransitionTime":"2025-12-09T16:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.620367 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.633285 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.647042 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.660009 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.669046 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.669128 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.669142 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.670765 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.670796 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:24Z","lastTransitionTime":"2025-12-09T16:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.673316 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.696016 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.714720 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.731207 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.745341 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.757405 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.768148 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.772868 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.772896 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.772903 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.772916 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.772925 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:24Z","lastTransitionTime":"2025-12-09T16:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.778788 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.792980 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.808457 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.832060 4840 generic.go:334] "Generic (PLEG): container finished" podID="6ad5e84b-9a8c-4644-9327-66c2170ffa58" containerID="21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df" exitCode=0 Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.832414 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" event={"ID":"6ad5e84b-9a8c-4644-9327-66c2170ffa58","Type":"ContainerDied","Data":"21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df"} Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.847328 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.866661 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.880346 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.880379 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.880388 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.880401 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.880410 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:24Z","lastTransitionTime":"2025-12-09T16:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.880931 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.894733 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.907593 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.921864 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.944233 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.957456 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.973572 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.983001 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.983202 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.983240 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.983275 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.983302 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:24Z","lastTransitionTime":"2025-12-09T16:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:24 crc kubenswrapper[4840]: I1209 16:57:24.987854 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:24Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.005511 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.020503 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.035508 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.056599 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.085857 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.085916 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.085931 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.085955 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.086009 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:25Z","lastTransitionTime":"2025-12-09T16:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.188789 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.188872 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.188897 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.188928 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.188953 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:25Z","lastTransitionTime":"2025-12-09T16:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.292837 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.292894 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.292912 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.292935 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.292956 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:25Z","lastTransitionTime":"2025-12-09T16:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.395245 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.395291 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.395308 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.395329 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.395343 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:25Z","lastTransitionTime":"2025-12-09T16:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.498025 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.498085 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.498112 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.498141 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.498166 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:25Z","lastTransitionTime":"2025-12-09T16:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.601606 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.601669 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.601692 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.601754 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.601779 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:25Z","lastTransitionTime":"2025-12-09T16:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.608087 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:25 crc kubenswrapper[4840]: E1209 16:57:25.608244 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.608256 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.608351 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:25 crc kubenswrapper[4840]: E1209 16:57:25.608469 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:25 crc kubenswrapper[4840]: E1209 16:57:25.608562 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.704820 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.704863 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.704875 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.704891 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.704901 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:25Z","lastTransitionTime":"2025-12-09T16:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.807651 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.807683 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.807693 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.807717 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.807728 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:25Z","lastTransitionTime":"2025-12-09T16:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.841569 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49"} Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.842521 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.842578 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.848174 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" event={"ID":"6ad5e84b-9a8c-4644-9327-66c2170ffa58","Type":"ContainerStarted","Data":"c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440"} Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.866847 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.875139 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.879598 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.896699 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.910035 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.910088 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.910105 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.910129 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.910147 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:25Z","lastTransitionTime":"2025-12-09T16:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.915214 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.934002 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.952842 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.970284 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:25 crc kubenswrapper[4840]: I1209 16:57:25.987191 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.000100 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:25Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.012789 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.012858 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.012880 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.012905 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.012923 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:26Z","lastTransitionTime":"2025-12-09T16:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.017731 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.029173 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.037400 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.050142 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.062626 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.080334 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.096529 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.109644 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.114744 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.114797 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.114822 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.114842 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.114857 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:26Z","lastTransitionTime":"2025-12-09T16:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.126514 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.142171 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.153257 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.163990 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.176010 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.195990 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.207624 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.218041 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.218084 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.218101 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.218122 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.218136 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:26Z","lastTransitionTime":"2025-12-09T16:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.221892 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.242803 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.261459 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.278322 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.294923 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.316626 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:26Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.320400 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.320495 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.320521 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.320554 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.320578 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:26Z","lastTransitionTime":"2025-12-09T16:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.424057 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.424121 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.424138 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.424165 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.424184 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:26Z","lastTransitionTime":"2025-12-09T16:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.526421 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.526465 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.526474 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.526488 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.526498 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:26Z","lastTransitionTime":"2025-12-09T16:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.632080 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.632480 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.632708 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.632871 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.633069 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:26Z","lastTransitionTime":"2025-12-09T16:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.736849 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.736916 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.736935 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.736960 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.737017 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:26Z","lastTransitionTime":"2025-12-09T16:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.840255 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.840363 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.840393 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.840423 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.840444 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:26Z","lastTransitionTime":"2025-12-09T16:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.853558 4840 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.943464 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.944072 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.944152 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.944251 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:26 crc kubenswrapper[4840]: I1209 16:57:26.944331 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:26Z","lastTransitionTime":"2025-12-09T16:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.046900 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.046939 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.046949 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.046978 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.046990 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.149702 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.149743 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.149751 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.149767 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.149776 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.252726 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.253006 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.253094 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.253175 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.253262 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.357749 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.357801 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.357821 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.357846 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.357863 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.461162 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.461226 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.461247 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.461272 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.461289 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.563782 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.563830 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.563839 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.563856 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.563873 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.608032 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.608058 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.608090 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:27 crc kubenswrapper[4840]: E1209 16:57:27.608581 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:27 crc kubenswrapper[4840]: E1209 16:57:27.608772 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:27 crc kubenswrapper[4840]: E1209 16:57:27.609016 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.666456 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.666517 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.666531 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.666557 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.666578 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.749990 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.750036 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.750047 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.750066 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.750078 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: E1209 16:57:27.770770 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.775982 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.776028 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.776038 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.776056 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.776068 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: E1209 16:57:27.794823 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.800320 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.800354 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.800367 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.800380 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.800390 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: E1209 16:57:27.820954 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.828667 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.828735 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.828753 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.829306 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.829381 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: E1209 16:57:27.850321 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.856048 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.856091 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.856109 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.856128 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.856144 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.861769 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/0.log" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.866712 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49" exitCode=1 Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.866785 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.867917 4840 scope.go:117] "RemoveContainer" containerID="f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49" Dec 09 16:57:27 crc kubenswrapper[4840]: E1209 16:57:27.875577 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:27 crc kubenswrapper[4840]: E1209 16:57:27.875765 4840 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.881452 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.881498 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.881510 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.881527 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.881541 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.882331 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.907514 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.926251 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.949865 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"1.Node event handler 7\\\\nI1209 16:57:27.482359 6121 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1209 16:57:27.482399 6121 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 16:57:27.482458 6121 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.482615 6121 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 16:57:27.482743 6121 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 16:57:27.482895 6121 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.483242 6121 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.483726 6121 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1209 16:57:27.483799 6121 factory.go:656] Stopping watch factory\\\\nI1209 16:57:27.483831 6121 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.961549 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.977009 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.984549 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.984609 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.984621 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.984642 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.984654 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:27Z","lastTransitionTime":"2025-12-09T16:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:27 crc kubenswrapper[4840]: I1209 16:57:27.992878 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:27Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.008903 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.022271 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.035167 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.053164 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.064550 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.077200 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.086441 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.086479 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.086487 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.086502 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.086511 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:28Z","lastTransitionTime":"2025-12-09T16:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.090877 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.188886 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.189024 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.189038 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.189051 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.189061 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:28Z","lastTransitionTime":"2025-12-09T16:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.291514 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.291559 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.291575 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.291595 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.291611 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:28Z","lastTransitionTime":"2025-12-09T16:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.393381 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.393412 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.393421 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.393434 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.393442 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:28Z","lastTransitionTime":"2025-12-09T16:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.495608 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.495643 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.495655 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.495674 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.495691 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:28Z","lastTransitionTime":"2025-12-09T16:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.597927 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.598011 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.598029 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.598053 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.598069 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:28Z","lastTransitionTime":"2025-12-09T16:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.700114 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.700143 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.700151 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.700163 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.700172 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:28Z","lastTransitionTime":"2025-12-09T16:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.802814 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.802859 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.802871 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.802887 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.802900 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:28Z","lastTransitionTime":"2025-12-09T16:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.871518 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/0.log" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.874549 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a"} Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.874645 4840 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.892520 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"1.Node event handler 7\\\\nI1209 16:57:27.482359 6121 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1209 16:57:27.482399 6121 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 16:57:27.482458 6121 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.482615 6121 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 16:57:27.482743 6121 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 16:57:27.482895 6121 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.483242 6121 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.483726 6121 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1209 16:57:27.483799 6121 factory.go:656] Stopping watch factory\\\\nI1209 16:57:27.483831 6121 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.904808 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.905555 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.905591 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.905602 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.905618 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.905629 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:28Z","lastTransitionTime":"2025-12-09T16:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.918532 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.929638 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.941517 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.951047 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.961326 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.975105 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:28 crc kubenswrapper[4840]: I1209 16:57:28.989380 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.000756 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:28Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.007954 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.008055 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.008102 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.008127 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.008144 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:29Z","lastTransitionTime":"2025-12-09T16:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.014998 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:29Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.032272 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:29Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.043380 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:29Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.057138 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:29Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.110163 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.110436 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.110529 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.110608 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.110688 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:29Z","lastTransitionTime":"2025-12-09T16:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.213195 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.213471 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.213556 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.213620 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.213675 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:29Z","lastTransitionTime":"2025-12-09T16:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.316251 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.316512 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.316587 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.316696 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.316772 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:29Z","lastTransitionTime":"2025-12-09T16:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.373058 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.373290 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:57:45.373252492 +0000 UTC m=+51.364363165 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.373459 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.373517 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.373637 4840 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.373658 4840 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.373705 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:45.373690195 +0000 UTC m=+51.364800858 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.373740 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:45.373716285 +0000 UTC m=+51.364826958 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.419572 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.419658 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.419687 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.419717 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.419740 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:29Z","lastTransitionTime":"2025-12-09T16:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.474695 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.474774 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.474930 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.474987 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.474998 4840 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.475055 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:45.475036889 +0000 UTC m=+51.466147522 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.474939 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.475148 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.475171 4840 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.475229 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 16:57:45.475212344 +0000 UTC m=+51.466322987 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.522435 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.522743 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.522909 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.523093 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.523274 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:29Z","lastTransitionTime":"2025-12-09T16:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.607602 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.607659 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.607773 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.607605 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.607891 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.608008 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.626015 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.626093 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.626119 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.626151 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.626174 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:29Z","lastTransitionTime":"2025-12-09T16:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.729477 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.729543 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.729566 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.729596 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.729618 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:29Z","lastTransitionTime":"2025-12-09T16:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.832464 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.832530 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.832553 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.832581 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.832604 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:29Z","lastTransitionTime":"2025-12-09T16:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.881383 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/1.log" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.882441 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/0.log" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.886299 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a" exitCode=1 Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.886373 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a"} Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.886483 4840 scope.go:117] "RemoveContainer" containerID="f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.887527 4840 scope.go:117] "RemoveContainer" containerID="7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a" Dec 09 16:57:29 crc kubenswrapper[4840]: E1209 16:57:29.887860 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.917074 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:29Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.935863 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:29Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.936062 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.936122 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.936151 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.936183 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.936208 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:29Z","lastTransitionTime":"2025-12-09T16:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.954341 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:29Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.986869 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"1.Node event handler 7\\\\nI1209 16:57:27.482359 6121 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1209 16:57:27.482399 6121 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 16:57:27.482458 6121 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.482615 6121 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 16:57:27.482743 6121 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 16:57:27.482895 6121 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.483242 6121 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.483726 6121 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1209 16:57:27.483799 6121 factory.go:656] Stopping watch factory\\\\nI1209 16:57:27.483831 6121 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:29Z\\\",\\\"message\\\":\\\"60206a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:28.691460 6246 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 16:57:28.691530 6246 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 16:57:28.691566 6246 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1209 16:57:28.691586 6246 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 1.02599ms\\\\nI1209 16:57:28.691612 6246 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 16:57:28.691680 6246 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 16:57:28.691752 6246 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1209 16:57:28.691684 6246 handler.go:208] Removed *v1.Node event handler 7\\\\nI1209 16:57:28.691787 6246 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 16:57:28.691813 6246 factory.go:656] Stopping watch factory\\\\nI1209 16:57:28.691838 6246 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:28.691849 6246 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1209 16:57:28.691895 6246 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 16:57:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:29Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:29 crc kubenswrapper[4840]: I1209 16:57:29.998907 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:29Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.011807 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.028816 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.038951 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.039174 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.039313 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.039482 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.039617 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:30Z","lastTransitionTime":"2025-12-09T16:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.046712 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.063272 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.079518 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.100904 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.115883 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.131716 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.142531 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.142571 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.142587 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.142607 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.142622 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:30Z","lastTransitionTime":"2025-12-09T16:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.151036 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.245091 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.245152 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.245174 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.245204 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.245227 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:30Z","lastTransitionTime":"2025-12-09T16:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.348368 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.348419 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.348456 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.348485 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.348507 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:30Z","lastTransitionTime":"2025-12-09T16:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.451031 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.451100 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.451114 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.451131 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.451142 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:30Z","lastTransitionTime":"2025-12-09T16:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.553362 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.553691 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.553787 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.553886 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.553992 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:30Z","lastTransitionTime":"2025-12-09T16:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.657336 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.657423 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.657450 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.657482 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.657506 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:30Z","lastTransitionTime":"2025-12-09T16:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.760852 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.760922 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.760939 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.760993 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.761015 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:30Z","lastTransitionTime":"2025-12-09T16:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.863385 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8"] Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.864222 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.864828 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.864870 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.864887 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.864907 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.864924 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:30Z","lastTransitionTime":"2025-12-09T16:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.866871 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.869182 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.888142 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.892449 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/1.log" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.899178 4840 scope.go:117] "RemoveContainer" containerID="7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a" Dec 09 16:57:30 crc kubenswrapper[4840]: E1209 16:57:30.899357 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.913202 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.930772 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.946195 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.960759 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.967553 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.967634 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.967658 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.967690 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.967712 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:30Z","lastTransitionTime":"2025-12-09T16:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.981753 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f96af5a938a59a5c78f174cf877cecec0a90e9bcb4d38765400371335addde49\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:27Z\\\",\\\"message\\\":\\\"1.Node event handler 7\\\\nI1209 16:57:27.482359 6121 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1209 16:57:27.482399 6121 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 16:57:27.482458 6121 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.482615 6121 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 16:57:27.482743 6121 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 16:57:27.482895 6121 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.483242 6121 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:57:27.483726 6121 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1209 16:57:27.483799 6121 factory.go:656] Stopping watch factory\\\\nI1209 16:57:27.483831 6121 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:29Z\\\",\\\"message\\\":\\\"60206a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:28.691460 6246 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 16:57:28.691530 6246 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 16:57:28.691566 6246 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1209 16:57:28.691586 6246 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 1.02599ms\\\\nI1209 16:57:28.691612 6246 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 16:57:28.691680 6246 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 16:57:28.691752 6246 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1209 16:57:28.691684 6246 handler.go:208] Removed *v1.Node event handler 7\\\\nI1209 16:57:28.691787 6246 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 16:57:28.691813 6246 factory.go:656] Stopping watch factory\\\\nI1209 16:57:28.691838 6246 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:28.691849 6246 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1209 16:57:28.691895 6246 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 16:57:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.993123 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a2ad4cd0-9c34-423b-937d-2856dcbbc640-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.993201 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbngm\" (UniqueName: \"kubernetes.io/projected/a2ad4cd0-9c34-423b-937d-2856dcbbc640-kube-api-access-bbngm\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.993371 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a2ad4cd0-9c34-423b-937d-2856dcbbc640-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.993500 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a2ad4cd0-9c34-423b-937d-2856dcbbc640-env-overrides\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:30 crc kubenswrapper[4840]: I1209 16:57:30.998092 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:30Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.027465 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.046373 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.063251 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.071043 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.071110 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.071129 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.071157 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.071176 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:31Z","lastTransitionTime":"2025-12-09T16:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.078500 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.094382 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a2ad4cd0-9c34-423b-937d-2856dcbbc640-env-overrides\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.094493 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a2ad4cd0-9c34-423b-937d-2856dcbbc640-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.094552 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbngm\" (UniqueName: \"kubernetes.io/projected/a2ad4cd0-9c34-423b-937d-2856dcbbc640-kube-api-access-bbngm\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.094600 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a2ad4cd0-9c34-423b-937d-2856dcbbc640-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.095544 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a2ad4cd0-9c34-423b-937d-2856dcbbc640-env-overrides\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.095943 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a2ad4cd0-9c34-423b-937d-2856dcbbc640-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.096344 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.103942 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a2ad4cd0-9c34-423b-937d-2856dcbbc640-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.114489 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.125606 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbngm\" (UniqueName: \"kubernetes.io/projected/a2ad4cd0-9c34-423b-937d-2856dcbbc640-kube-api-access-bbngm\") pod \"ovnkube-control-plane-749d76644c-zxql8\" (UID: \"a2ad4cd0-9c34-423b-937d-2856dcbbc640\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.142133 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.157391 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.167303 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.174403 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.174443 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.174453 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.174467 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.174477 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:31Z","lastTransitionTime":"2025-12-09T16:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.179899 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.191541 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.195141 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.219654 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:29Z\\\",\\\"message\\\":\\\"60206a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:28.691460 6246 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 16:57:28.691530 6246 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 16:57:28.691566 6246 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1209 16:57:28.691586 6246 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 1.02599ms\\\\nI1209 16:57:28.691612 6246 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 16:57:28.691680 6246 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 16:57:28.691752 6246 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1209 16:57:28.691684 6246 handler.go:208] Removed *v1.Node event handler 7\\\\nI1209 16:57:28.691787 6246 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 16:57:28.691813 6246 factory.go:656] Stopping watch factory\\\\nI1209 16:57:28.691838 6246 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:28.691849 6246 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1209 16:57:28.691895 6246 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 16:57:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.230675 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.245799 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.258989 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.273324 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.277657 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.277698 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.277710 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.277726 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.277738 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:31Z","lastTransitionTime":"2025-12-09T16:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.284943 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.303438 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.317279 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.332509 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.344523 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.354871 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.367174 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.379370 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.379401 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.379409 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.379422 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.379433 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:31Z","lastTransitionTime":"2025-12-09T16:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.481556 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.481589 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.481599 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.481613 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.481623 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:31Z","lastTransitionTime":"2025-12-09T16:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.584254 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.584311 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.584328 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.584351 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.584368 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:31Z","lastTransitionTime":"2025-12-09T16:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.607571 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.607600 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.607589 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:31 crc kubenswrapper[4840]: E1209 16:57:31.607750 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:31 crc kubenswrapper[4840]: E1209 16:57:31.607851 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:31 crc kubenswrapper[4840]: E1209 16:57:31.608016 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.687143 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.687198 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.687211 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.687228 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.687241 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:31Z","lastTransitionTime":"2025-12-09T16:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.790043 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.790094 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.790106 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.790122 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.790133 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:31Z","lastTransitionTime":"2025-12-09T16:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.893498 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.893560 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.893577 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.893605 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.893623 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:31Z","lastTransitionTime":"2025-12-09T16:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.904800 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" event={"ID":"a2ad4cd0-9c34-423b-937d-2856dcbbc640","Type":"ContainerStarted","Data":"39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.904868 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" event={"ID":"a2ad4cd0-9c34-423b-937d-2856dcbbc640","Type":"ContainerStarted","Data":"e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.904890 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" event={"ID":"a2ad4cd0-9c34-423b-937d-2856dcbbc640","Type":"ContainerStarted","Data":"3dbbe0e7776816ae0aec66d1c0893793ac93b0917302791069c42827b3469781"} Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.919127 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.932645 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.954467 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.971586 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.990361 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:31Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.995812 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.995876 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.995900 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.995930 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:31 crc kubenswrapper[4840]: I1209 16:57:31.995953 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:31Z","lastTransitionTime":"2025-12-09T16:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.010093 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.032763 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.054765 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.069042 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.086421 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.099664 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.099704 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.099718 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.099737 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.099750 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:32Z","lastTransitionTime":"2025-12-09T16:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.107259 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.122373 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.140015 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.152342 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.177517 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:29Z\\\",\\\"message\\\":\\\"60206a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:28.691460 6246 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 16:57:28.691530 6246 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 16:57:28.691566 6246 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1209 16:57:28.691586 6246 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 1.02599ms\\\\nI1209 16:57:28.691612 6246 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 16:57:28.691680 6246 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 16:57:28.691752 6246 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1209 16:57:28.691684 6246 handler.go:208] Removed *v1.Node event handler 7\\\\nI1209 16:57:28.691787 6246 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 16:57:28.691813 6246 factory.go:656] Stopping watch factory\\\\nI1209 16:57:28.691838 6246 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:28.691849 6246 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1209 16:57:28.691895 6246 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 16:57:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.202485 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.202538 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.202554 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.202576 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.202591 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:32Z","lastTransitionTime":"2025-12-09T16:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.305206 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.305309 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.305333 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.305362 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.305385 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:32Z","lastTransitionTime":"2025-12-09T16:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.357327 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.358112 4840 scope.go:117] "RemoveContainer" containerID="7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a" Dec 09 16:57:32 crc kubenswrapper[4840]: E1209 16:57:32.358257 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.363624 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-hc4xq"] Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.364347 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:32 crc kubenswrapper[4840]: E1209 16:57:32.364437 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.376302 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.396299 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.408040 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.408074 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.408081 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.408093 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.408102 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:32Z","lastTransitionTime":"2025-12-09T16:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.410216 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.422933 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.436721 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.456919 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.497075 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.510446 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.510495 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.510507 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.510527 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.510541 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:32Z","lastTransitionTime":"2025-12-09T16:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.513866 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vk4ds\" (UniqueName: \"kubernetes.io/projected/2099e918-a035-4659-8247-971e3e59c6ef-kube-api-access-vk4ds\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.513925 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.521862 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.536629 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.555912 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:29Z\\\",\\\"message\\\":\\\"60206a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:28.691460 6246 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 16:57:28.691530 6246 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 16:57:28.691566 6246 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1209 16:57:28.691586 6246 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 1.02599ms\\\\nI1209 16:57:28.691612 6246 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 16:57:28.691680 6246 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 16:57:28.691752 6246 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1209 16:57:28.691684 6246 handler.go:208] Removed *v1.Node event handler 7\\\\nI1209 16:57:28.691787 6246 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 16:57:28.691813 6246 factory.go:656] Stopping watch factory\\\\nI1209 16:57:28.691838 6246 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:28.691849 6246 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1209 16:57:28.691895 6246 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 16:57:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.566731 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.578768 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.591132 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.602089 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.612331 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.613691 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.613726 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.613738 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.613756 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.613768 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:32Z","lastTransitionTime":"2025-12-09T16:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.614497 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vk4ds\" (UniqueName: \"kubernetes.io/projected/2099e918-a035-4659-8247-971e3e59c6ef-kube-api-access-vk4ds\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.614524 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:32 crc kubenswrapper[4840]: E1209 16:57:32.614637 4840 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:32 crc kubenswrapper[4840]: E1209 16:57:32.614686 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs podName:2099e918-a035-4659-8247-971e3e59c6ef nodeName:}" failed. No retries permitted until 2025-12-09 16:57:33.114674045 +0000 UTC m=+39.105784678 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs") pod "network-metrics-daemon-hc4xq" (UID: "2099e918-a035-4659-8247-971e3e59c6ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.627057 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:32Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.641820 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vk4ds\" (UniqueName: \"kubernetes.io/projected/2099e918-a035-4659-8247-971e3e59c6ef-kube-api-access-vk4ds\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.716196 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.716238 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.716247 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.716286 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.716296 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:32Z","lastTransitionTime":"2025-12-09T16:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.818811 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.818861 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.818879 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.818901 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.818918 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:32Z","lastTransitionTime":"2025-12-09T16:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.921836 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.921884 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.921901 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.921922 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:32 crc kubenswrapper[4840]: I1209 16:57:32.921939 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:32Z","lastTransitionTime":"2025-12-09T16:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.024489 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.024576 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.024602 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.024636 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.024661 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:33Z","lastTransitionTime":"2025-12-09T16:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.119874 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:33 crc kubenswrapper[4840]: E1209 16:57:33.120194 4840 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:33 crc kubenswrapper[4840]: E1209 16:57:33.120343 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs podName:2099e918-a035-4659-8247-971e3e59c6ef nodeName:}" failed. No retries permitted until 2025-12-09 16:57:34.120313713 +0000 UTC m=+40.111424376 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs") pod "network-metrics-daemon-hc4xq" (UID: "2099e918-a035-4659-8247-971e3e59c6ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.127990 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.128043 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.128066 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.128089 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.128106 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:33Z","lastTransitionTime":"2025-12-09T16:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.231673 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.231748 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.231767 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.231794 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.231811 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:33Z","lastTransitionTime":"2025-12-09T16:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.334703 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.334768 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.334786 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.334810 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.334827 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:33Z","lastTransitionTime":"2025-12-09T16:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.438099 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.438271 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.438296 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.438326 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.438348 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:33Z","lastTransitionTime":"2025-12-09T16:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.541482 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.541549 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.541569 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.541594 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.541611 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:33Z","lastTransitionTime":"2025-12-09T16:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.607846 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.607931 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.608030 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:33 crc kubenswrapper[4840]: E1209 16:57:33.608205 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.608244 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:33 crc kubenswrapper[4840]: E1209 16:57:33.608381 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:33 crc kubenswrapper[4840]: E1209 16:57:33.608533 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:33 crc kubenswrapper[4840]: E1209 16:57:33.608698 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.644299 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.644387 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.644405 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.644428 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.644479 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:33Z","lastTransitionTime":"2025-12-09T16:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.747220 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.747298 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.747324 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.747357 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.747381 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:33Z","lastTransitionTime":"2025-12-09T16:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.851827 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.851871 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.851880 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.851898 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.851910 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:33Z","lastTransitionTime":"2025-12-09T16:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.954837 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.954911 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.954935 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.955011 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:33 crc kubenswrapper[4840]: I1209 16:57:33.955039 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:33Z","lastTransitionTime":"2025-12-09T16:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.058634 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.058693 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.058710 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.058734 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.058753 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:34Z","lastTransitionTime":"2025-12-09T16:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.133858 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:34 crc kubenswrapper[4840]: E1209 16:57:34.134101 4840 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:34 crc kubenswrapper[4840]: E1209 16:57:34.134176 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs podName:2099e918-a035-4659-8247-971e3e59c6ef nodeName:}" failed. No retries permitted until 2025-12-09 16:57:36.134158135 +0000 UTC m=+42.125268778 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs") pod "network-metrics-daemon-hc4xq" (UID: "2099e918-a035-4659-8247-971e3e59c6ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.160827 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.160865 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.160877 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.160892 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.160903 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:34Z","lastTransitionTime":"2025-12-09T16:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.264830 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.264870 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.264880 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.264894 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.264904 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:34Z","lastTransitionTime":"2025-12-09T16:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.367544 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.367597 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.367617 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.367638 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.367650 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:34Z","lastTransitionTime":"2025-12-09T16:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.470792 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.470848 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.470865 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.470889 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.470938 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:34Z","lastTransitionTime":"2025-12-09T16:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.573387 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.573444 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.573461 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.573484 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.573503 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:34Z","lastTransitionTime":"2025-12-09T16:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.631000 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.663262 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:29Z\\\",\\\"message\\\":\\\"60206a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:28.691460 6246 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 16:57:28.691530 6246 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 16:57:28.691566 6246 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1209 16:57:28.691586 6246 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 1.02599ms\\\\nI1209 16:57:28.691612 6246 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 16:57:28.691680 6246 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 16:57:28.691752 6246 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1209 16:57:28.691684 6246 handler.go:208] Removed *v1.Node event handler 7\\\\nI1209 16:57:28.691787 6246 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 16:57:28.691813 6246 factory.go:656] Stopping watch factory\\\\nI1209 16:57:28.691838 6246 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:28.691849 6246 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1209 16:57:28.691895 6246 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 16:57:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.677099 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.677502 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.677775 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.678054 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.678290 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:34Z","lastTransitionTime":"2025-12-09T16:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.687053 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.709411 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.725843 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.739002 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.753219 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.768465 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.782385 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.782447 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.782473 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.782504 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.782529 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:34Z","lastTransitionTime":"2025-12-09T16:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.787748 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.805915 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.835513 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.854127 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.871024 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.885925 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.885985 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.885997 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.886016 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.886028 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:34Z","lastTransitionTime":"2025-12-09T16:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.891237 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.914955 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.935599 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:34Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.988866 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.988934 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.988954 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.989010 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:34 crc kubenswrapper[4840]: I1209 16:57:34.989030 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:34Z","lastTransitionTime":"2025-12-09T16:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.091856 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.091919 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.091938 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.091961 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.092012 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:35Z","lastTransitionTime":"2025-12-09T16:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.195207 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.195270 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.195287 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.195311 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.195328 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:35Z","lastTransitionTime":"2025-12-09T16:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.298324 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.298398 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.298421 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.298451 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.298474 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:35Z","lastTransitionTime":"2025-12-09T16:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.401851 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.402273 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.402498 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.402699 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.402889 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:35Z","lastTransitionTime":"2025-12-09T16:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.505662 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.505723 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.505766 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.505790 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.505807 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:35Z","lastTransitionTime":"2025-12-09T16:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.607591 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.607671 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.607700 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.607754 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.608280 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.608310 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.608329 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.608349 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.608367 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:35Z","lastTransitionTime":"2025-12-09T16:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:35 crc kubenswrapper[4840]: E1209 16:57:35.608749 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:35 crc kubenswrapper[4840]: E1209 16:57:35.609012 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:35 crc kubenswrapper[4840]: E1209 16:57:35.609230 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:35 crc kubenswrapper[4840]: E1209 16:57:35.609461 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.711899 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.712358 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.712520 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.712681 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.712843 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:35Z","lastTransitionTime":"2025-12-09T16:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.816374 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.816464 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.816506 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.816539 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.816581 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:35Z","lastTransitionTime":"2025-12-09T16:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.919655 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.919717 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.919735 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.919758 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:35 crc kubenswrapper[4840]: I1209 16:57:35.919777 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:35Z","lastTransitionTime":"2025-12-09T16:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.023734 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.023791 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.023809 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.023833 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.023851 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:36Z","lastTransitionTime":"2025-12-09T16:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.127465 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.128177 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.128217 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.128250 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.128272 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:36Z","lastTransitionTime":"2025-12-09T16:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.156574 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:36 crc kubenswrapper[4840]: E1209 16:57:36.156814 4840 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:36 crc kubenswrapper[4840]: E1209 16:57:36.156935 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs podName:2099e918-a035-4659-8247-971e3e59c6ef nodeName:}" failed. No retries permitted until 2025-12-09 16:57:40.156900777 +0000 UTC m=+46.148011450 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs") pod "network-metrics-daemon-hc4xq" (UID: "2099e918-a035-4659-8247-971e3e59c6ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.231479 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.231538 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.231556 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.231579 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.231597 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:36Z","lastTransitionTime":"2025-12-09T16:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.334885 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.334960 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.335012 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.335043 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.335065 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:36Z","lastTransitionTime":"2025-12-09T16:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.438028 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.438113 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.438152 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.438185 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.438208 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:36Z","lastTransitionTime":"2025-12-09T16:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.541344 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.541418 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.541459 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.541497 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.541523 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:36Z","lastTransitionTime":"2025-12-09T16:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.645488 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.645600 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.645621 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.645650 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.645673 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:36Z","lastTransitionTime":"2025-12-09T16:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.748578 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.748653 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.748671 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.748694 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.748710 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:36Z","lastTransitionTime":"2025-12-09T16:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.852307 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.852398 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.852423 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.852456 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.852481 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:36Z","lastTransitionTime":"2025-12-09T16:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.955937 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.956055 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.956082 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.956118 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:36 crc kubenswrapper[4840]: I1209 16:57:36.956141 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:36Z","lastTransitionTime":"2025-12-09T16:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.059699 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.059780 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.059797 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.059825 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.059870 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:37Z","lastTransitionTime":"2025-12-09T16:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.162367 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.162422 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.162437 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.162456 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.162472 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:37Z","lastTransitionTime":"2025-12-09T16:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.264937 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.265023 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.265036 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.265053 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.265065 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:37Z","lastTransitionTime":"2025-12-09T16:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.367841 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.367902 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.367923 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.367998 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.368021 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:37Z","lastTransitionTime":"2025-12-09T16:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.472023 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.472097 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.472115 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.472138 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.472159 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:37Z","lastTransitionTime":"2025-12-09T16:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.574209 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.574257 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.574269 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.574286 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.574298 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:37Z","lastTransitionTime":"2025-12-09T16:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.607880 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.607939 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.607946 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.607894 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:37 crc kubenswrapper[4840]: E1209 16:57:37.608041 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:37 crc kubenswrapper[4840]: E1209 16:57:37.608125 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:37 crc kubenswrapper[4840]: E1209 16:57:37.608210 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:37 crc kubenswrapper[4840]: E1209 16:57:37.608364 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.677757 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.677830 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.677844 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.677862 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.677873 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:37Z","lastTransitionTime":"2025-12-09T16:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.780699 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.780768 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.780791 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.780821 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.780846 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:37Z","lastTransitionTime":"2025-12-09T16:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.884065 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.884137 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.884154 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.884179 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:37 crc kubenswrapper[4840]: I1209 16:57:37.884197 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:37Z","lastTransitionTime":"2025-12-09T16:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.006041 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.006108 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.006134 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.006163 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.006188 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.068822 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.068860 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.068868 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.068880 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.068890 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: E1209 16:57:38.082654 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:38Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.086983 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.087034 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.087046 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.087061 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.087070 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: E1209 16:57:38.105866 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:38Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.110959 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.111071 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.111095 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.111126 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.111148 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: E1209 16:57:38.131860 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:38Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.137010 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.137052 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.137069 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.137093 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.137109 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: E1209 16:57:38.156663 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:38Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.161278 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.161339 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.161363 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.161389 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.161410 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: E1209 16:57:38.181060 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:38Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:38 crc kubenswrapper[4840]: E1209 16:57:38.181305 4840 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.184195 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.184260 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.184286 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.184316 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.184340 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.287320 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.287441 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.287461 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.287525 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.287544 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.390404 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.390471 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.390489 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.390517 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.390541 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.493750 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.493817 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.493850 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.493890 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.493915 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.597028 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.597132 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.597156 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.597188 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.597209 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.699592 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.699659 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.699682 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.699711 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.699730 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.802887 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.802938 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.803021 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.803053 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.803114 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.906789 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.906852 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.906866 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.906888 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:38 crc kubenswrapper[4840]: I1209 16:57:38.906910 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:38Z","lastTransitionTime":"2025-12-09T16:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.009920 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.009982 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.009996 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.010015 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.010027 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:39Z","lastTransitionTime":"2025-12-09T16:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.113185 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.113229 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.113240 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.113256 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.113270 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:39Z","lastTransitionTime":"2025-12-09T16:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.216729 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.216804 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.216823 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.216848 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.216866 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:39Z","lastTransitionTime":"2025-12-09T16:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.320470 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.320537 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.320555 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.320579 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.320597 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:39Z","lastTransitionTime":"2025-12-09T16:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.424032 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.424090 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.424113 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.424139 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.424157 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:39Z","lastTransitionTime":"2025-12-09T16:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.527198 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.527260 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.527278 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.527301 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.527318 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:39Z","lastTransitionTime":"2025-12-09T16:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.608110 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.608142 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.608146 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.608221 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:39 crc kubenswrapper[4840]: E1209 16:57:39.608294 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:39 crc kubenswrapper[4840]: E1209 16:57:39.608519 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:39 crc kubenswrapper[4840]: E1209 16:57:39.608730 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:39 crc kubenswrapper[4840]: E1209 16:57:39.608838 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.630447 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.630514 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.630538 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.630568 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.630591 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:39Z","lastTransitionTime":"2025-12-09T16:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.733370 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.733423 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.733438 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.733461 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.733478 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:39Z","lastTransitionTime":"2025-12-09T16:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.836788 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.836869 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.836897 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.836926 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.836947 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:39Z","lastTransitionTime":"2025-12-09T16:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.939948 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.940049 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.940075 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.940111 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:39 crc kubenswrapper[4840]: I1209 16:57:39.940135 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:39Z","lastTransitionTime":"2025-12-09T16:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.043381 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.043433 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.043449 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.043474 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.043491 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:40Z","lastTransitionTime":"2025-12-09T16:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.147273 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.147338 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.147357 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.147381 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.147400 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:40Z","lastTransitionTime":"2025-12-09T16:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.206897 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:40 crc kubenswrapper[4840]: E1209 16:57:40.207181 4840 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:40 crc kubenswrapper[4840]: E1209 16:57:40.207334 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs podName:2099e918-a035-4659-8247-971e3e59c6ef nodeName:}" failed. No retries permitted until 2025-12-09 16:57:48.20729822 +0000 UTC m=+54.198408903 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs") pod "network-metrics-daemon-hc4xq" (UID: "2099e918-a035-4659-8247-971e3e59c6ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.249912 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.250016 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.250052 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.250088 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.250110 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:40Z","lastTransitionTime":"2025-12-09T16:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.353907 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.354022 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.354049 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.354078 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.354100 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:40Z","lastTransitionTime":"2025-12-09T16:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.461477 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.461545 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.461568 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.461599 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.461617 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:40Z","lastTransitionTime":"2025-12-09T16:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.565476 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.565559 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.565578 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.565608 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.565626 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:40Z","lastTransitionTime":"2025-12-09T16:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.669038 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.669122 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.669144 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.669170 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.669191 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:40Z","lastTransitionTime":"2025-12-09T16:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.773287 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.773352 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.773369 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.773399 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.773418 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:40Z","lastTransitionTime":"2025-12-09T16:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.876567 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.876639 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.876664 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.876696 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.876718 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:40Z","lastTransitionTime":"2025-12-09T16:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.979747 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.979849 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.979869 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.979896 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:40 crc kubenswrapper[4840]: I1209 16:57:40.979920 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:40Z","lastTransitionTime":"2025-12-09T16:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.083244 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.083309 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.083331 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.083359 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.083377 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:41Z","lastTransitionTime":"2025-12-09T16:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.186715 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.186774 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.186793 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.186818 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.186836 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:41Z","lastTransitionTime":"2025-12-09T16:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.299055 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.300013 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.300202 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.300338 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.300459 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:41Z","lastTransitionTime":"2025-12-09T16:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.404045 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.404095 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.404107 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.404125 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.404136 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:41Z","lastTransitionTime":"2025-12-09T16:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.507183 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.507617 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.507913 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.508258 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.508569 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:41Z","lastTransitionTime":"2025-12-09T16:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.608116 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.608120 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.608548 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:41 crc kubenswrapper[4840]: E1209 16:57:41.608717 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:41 crc kubenswrapper[4840]: E1209 16:57:41.608929 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:41 crc kubenswrapper[4840]: E1209 16:57:41.609292 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.609577 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:41 crc kubenswrapper[4840]: E1209 16:57:41.609841 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.611798 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.612062 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.612262 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.612455 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.612704 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:41Z","lastTransitionTime":"2025-12-09T16:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.715765 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.715835 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.715855 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.715877 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.715894 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:41Z","lastTransitionTime":"2025-12-09T16:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.818260 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.818333 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.818357 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.818386 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.818407 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:41Z","lastTransitionTime":"2025-12-09T16:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.921372 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.921458 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.921482 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.921517 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:41 crc kubenswrapper[4840]: I1209 16:57:41.921541 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:41Z","lastTransitionTime":"2025-12-09T16:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.025522 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.025597 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.025617 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.025647 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.025665 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:42Z","lastTransitionTime":"2025-12-09T16:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.128477 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.128574 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.128597 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.128864 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.128887 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:42Z","lastTransitionTime":"2025-12-09T16:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.231903 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.232002 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.232031 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.232062 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.232084 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:42Z","lastTransitionTime":"2025-12-09T16:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.334843 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.334900 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.334916 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.334936 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.335002 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:42Z","lastTransitionTime":"2025-12-09T16:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.437239 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.437289 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.437299 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.437314 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.437326 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:42Z","lastTransitionTime":"2025-12-09T16:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.540163 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.540202 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.540216 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.540232 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.540244 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:42Z","lastTransitionTime":"2025-12-09T16:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.643734 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.643805 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.643823 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.643847 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.643866 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:42Z","lastTransitionTime":"2025-12-09T16:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.747370 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.747436 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.747459 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.747519 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.747550 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:42Z","lastTransitionTime":"2025-12-09T16:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.850454 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.850516 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.850532 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.850556 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.850573 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:42Z","lastTransitionTime":"2025-12-09T16:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.953252 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.953319 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.953337 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.953364 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:42 crc kubenswrapper[4840]: I1209 16:57:42.953382 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:42Z","lastTransitionTime":"2025-12-09T16:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.057389 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.057455 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.057473 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.057500 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.057533 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:43Z","lastTransitionTime":"2025-12-09T16:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.160740 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.161020 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.161038 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.161062 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.161080 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:43Z","lastTransitionTime":"2025-12-09T16:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.264557 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.264679 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.264700 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.264724 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.264745 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:43Z","lastTransitionTime":"2025-12-09T16:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.368667 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.368753 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.368777 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.368808 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.368831 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:43Z","lastTransitionTime":"2025-12-09T16:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.472647 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.472714 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.472737 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.472767 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.472930 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:43Z","lastTransitionTime":"2025-12-09T16:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.575782 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.575857 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.575910 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.575941 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.576153 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:43Z","lastTransitionTime":"2025-12-09T16:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.607936 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.607998 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.608023 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.608116 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:43 crc kubenswrapper[4840]: E1209 16:57:43.608221 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:43 crc kubenswrapper[4840]: E1209 16:57:43.608369 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:43 crc kubenswrapper[4840]: E1209 16:57:43.608473 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:43 crc kubenswrapper[4840]: E1209 16:57:43.608584 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.678754 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.678824 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.678846 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.678874 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.678895 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:43Z","lastTransitionTime":"2025-12-09T16:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.781627 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.781682 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.781696 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.781709 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.781718 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:43Z","lastTransitionTime":"2025-12-09T16:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.884818 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.884873 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.884891 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.884916 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.884932 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:43Z","lastTransitionTime":"2025-12-09T16:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.988350 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.988417 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.988451 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.988482 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:43 crc kubenswrapper[4840]: I1209 16:57:43.988506 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:43Z","lastTransitionTime":"2025-12-09T16:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.091625 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.091683 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.091700 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.091724 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.091751 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:44Z","lastTransitionTime":"2025-12-09T16:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.195204 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.195287 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.195308 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.195339 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.195359 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:44Z","lastTransitionTime":"2025-12-09T16:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.298246 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.298301 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.298314 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.298331 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.298342 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:44Z","lastTransitionTime":"2025-12-09T16:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.401207 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.401276 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.401292 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.401320 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.401338 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:44Z","lastTransitionTime":"2025-12-09T16:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.504660 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.504726 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.504745 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.504769 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.504787 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:44Z","lastTransitionTime":"2025-12-09T16:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.608149 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.608190 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.608201 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.608218 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.608229 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:44Z","lastTransitionTime":"2025-12-09T16:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.626487 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.643076 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.658869 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.674507 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.693048 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.710137 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.710491 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.710707 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.710917 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.711138 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:44Z","lastTransitionTime":"2025-12-09T16:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.712094 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.730213 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.744726 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.760605 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.780683 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.797773 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.812560 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.814318 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.814363 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.814377 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.814395 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.814410 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:44Z","lastTransitionTime":"2025-12-09T16:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.830908 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:29Z\\\",\\\"message\\\":\\\"60206a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:28.691460 6246 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 16:57:28.691530 6246 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 16:57:28.691566 6246 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1209 16:57:28.691586 6246 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 1.02599ms\\\\nI1209 16:57:28.691612 6246 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 16:57:28.691680 6246 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 16:57:28.691752 6246 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1209 16:57:28.691684 6246 handler.go:208] Removed *v1.Node event handler 7\\\\nI1209 16:57:28.691787 6246 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 16:57:28.691813 6246 factory.go:656] Stopping watch factory\\\\nI1209 16:57:28.691838 6246 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:28.691849 6246 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1209 16:57:28.691895 6246 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 16:57:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.843519 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.855803 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.869040 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:44Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.916539 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.916942 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.917039 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.917119 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:44 crc kubenswrapper[4840]: I1209 16:57:44.917256 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:44Z","lastTransitionTime":"2025-12-09T16:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.020879 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.020951 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.021014 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.021048 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.021071 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:45Z","lastTransitionTime":"2025-12-09T16:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.123960 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.124077 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.124102 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.124128 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.124149 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:45Z","lastTransitionTime":"2025-12-09T16:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.468052 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.468202 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.468311 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.468446 4840 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.468512 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:58:17.468492557 +0000 UTC m=+83.459603200 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.468793 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:58:17.468778565 +0000 UTC m=+83.459889218 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.468843 4840 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.468882 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:58:17.468870708 +0000 UTC m=+83.459981361 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.519494 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.519538 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.519549 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.519563 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.519573 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:45Z","lastTransitionTime":"2025-12-09T16:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.569592 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.569744 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.569768 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.569797 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.569815 4840 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.569876 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 16:58:17.569854621 +0000 UTC m=+83.560965264 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.569907 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.569932 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.569953 4840 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.570105 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 16:58:17.570080088 +0000 UTC m=+83.561190761 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.608166 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.608234 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.608252 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.608414 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.608417 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.608514 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.608651 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:45 crc kubenswrapper[4840]: E1209 16:57:45.608868 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.610103 4840 scope.go:117] "RemoveContainer" containerID="7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.622207 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.622672 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.622697 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.622727 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.622750 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:45Z","lastTransitionTime":"2025-12-09T16:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.725794 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.725889 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.725906 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.725930 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.725949 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:45Z","lastTransitionTime":"2025-12-09T16:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.836140 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.836192 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.836204 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.836224 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.836237 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:45Z","lastTransitionTime":"2025-12-09T16:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.941187 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.941246 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.941261 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.941283 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.941298 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:45Z","lastTransitionTime":"2025-12-09T16:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.959302 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/1.log" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.964513 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32"} Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.965014 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.979516 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:45Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:45 crc kubenswrapper[4840]: I1209 16:57:45.998031 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:45Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.013248 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.037105 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.043629 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.043670 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.043702 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.043724 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.043738 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:46Z","lastTransitionTime":"2025-12-09T16:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.054305 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.087493 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:29Z\\\",\\\"message\\\":\\\"60206a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:28.691460 6246 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 16:57:28.691530 6246 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 16:57:28.691566 6246 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1209 16:57:28.691586 6246 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 1.02599ms\\\\nI1209 16:57:28.691612 6246 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 16:57:28.691680 6246 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 16:57:28.691752 6246 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1209 16:57:28.691684 6246 handler.go:208] Removed *v1.Node event handler 7\\\\nI1209 16:57:28.691787 6246 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 16:57:28.691813 6246 factory.go:656] Stopping watch factory\\\\nI1209 16:57:28.691838 6246 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:28.691849 6246 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1209 16:57:28.691895 6246 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 16:57:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.102854 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.118951 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.134039 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.146907 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.146953 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.146978 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.146999 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.147009 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:46Z","lastTransitionTime":"2025-12-09T16:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.147286 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.160296 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.173282 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.190585 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.204864 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.216251 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.226728 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.249035 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.249089 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.249104 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.249150 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.249165 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:46Z","lastTransitionTime":"2025-12-09T16:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.351684 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.351726 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.351737 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.351751 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.351760 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:46Z","lastTransitionTime":"2025-12-09T16:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.454402 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.454466 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.454488 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.454510 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.454526 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:46Z","lastTransitionTime":"2025-12-09T16:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.556582 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.556629 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.556639 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.556654 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.556664 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:46Z","lastTransitionTime":"2025-12-09T16:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.659482 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.659529 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.659544 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.659574 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.659590 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:46Z","lastTransitionTime":"2025-12-09T16:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.762532 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.762588 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.762605 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.762667 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.762685 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:46Z","lastTransitionTime":"2025-12-09T16:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.866736 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.866818 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.866838 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.866866 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.866885 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:46Z","lastTransitionTime":"2025-12-09T16:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.969473 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.969534 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.969550 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.969575 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.969592 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:46Z","lastTransitionTime":"2025-12-09T16:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.970721 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/2.log" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.971924 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/1.log" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.976475 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32" exitCode=1 Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.976521 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32"} Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.976607 4840 scope.go:117] "RemoveContainer" containerID="7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.979656 4840 scope.go:117] "RemoveContainer" containerID="eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32" Dec 09 16:57:46 crc kubenswrapper[4840]: E1209 16:57:46.981314 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" Dec 09 16:57:46 crc kubenswrapper[4840]: I1209 16:57:46.997649 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:46Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.016424 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.035583 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.065315 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7aaa15a68fd4e135fe410d3b5ecb20da8a5ea44d0ef0f7dd0b39b8eafb27ee7a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:29Z\\\",\\\"message\\\":\\\"60206a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:28.691460 6246 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 16:57:28.691530 6246 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 16:57:28.691566 6246 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1209 16:57:28.691586 6246 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 1.02599ms\\\\nI1209 16:57:28.691612 6246 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 16:57:28.691680 6246 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 16:57:28.691752 6246 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1209 16:57:28.691684 6246 handler.go:208] Removed *v1.Node event handler 7\\\\nI1209 16:57:28.691787 6246 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 16:57:28.691813 6246 factory.go:656] Stopping watch factory\\\\nI1209 16:57:28.691838 6246 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:57:28.691849 6246 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1209 16:57:28.691895 6246 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 16:57:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:46Z\\\",\\\"message\\\":\\\"ect:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1209 16:57:46.455662 6465 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication-operator/metrics]} name:Service_openshift-authentication-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.150:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6ea1fd71-2b40-4361-92ee-3f1ab4ec7414}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:46.455761 6465 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.g\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.072393 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.072456 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.072473 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.072496 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.072513 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:47Z","lastTransitionTime":"2025-12-09T16:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.080537 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.092781 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.108205 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.130365 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.149258 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.167231 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.175074 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.175137 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.175157 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.175182 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.175199 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:47Z","lastTransitionTime":"2025-12-09T16:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.182948 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.197874 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.213600 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.224312 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.235498 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.253674 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:47Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.277582 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.277618 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.277631 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.277648 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.277661 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:47Z","lastTransitionTime":"2025-12-09T16:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.380735 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.380818 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.380840 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.380867 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.380886 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:47Z","lastTransitionTime":"2025-12-09T16:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.484530 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.484586 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.484603 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.484626 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.484647 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:47Z","lastTransitionTime":"2025-12-09T16:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.587401 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.587481 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.587499 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.587527 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.587550 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:47Z","lastTransitionTime":"2025-12-09T16:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.608403 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.608475 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.608622 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:47 crc kubenswrapper[4840]: E1209 16:57:47.608618 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.608670 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:47 crc kubenswrapper[4840]: E1209 16:57:47.608817 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:47 crc kubenswrapper[4840]: E1209 16:57:47.609067 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:47 crc kubenswrapper[4840]: E1209 16:57:47.609154 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.690777 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.690835 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.690853 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.690876 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.690893 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:47Z","lastTransitionTime":"2025-12-09T16:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.793324 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.793389 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.793581 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.793628 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.793666 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:47Z","lastTransitionTime":"2025-12-09T16:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.896690 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.896750 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.896770 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.896795 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.896813 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:47Z","lastTransitionTime":"2025-12-09T16:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.982200 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/2.log" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.986580 4840 scope.go:117] "RemoveContainer" containerID="eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32" Dec 09 16:57:47 crc kubenswrapper[4840]: E1209 16:57:47.986756 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.999178 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.999246 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.999270 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.999300 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:47 crc kubenswrapper[4840]: I1209 16:57:47.999327 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:47Z","lastTransitionTime":"2025-12-09T16:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.011563 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.030485 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.048679 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.064066 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.077693 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.093467 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.102794 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.102839 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.102855 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.102878 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.102893 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.110728 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.130723 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.201305 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.206559 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.206599 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.206615 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.206699 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.206777 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.217631 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.234880 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.253364 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.267897 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.290279 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.302645 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:48 crc kubenswrapper[4840]: E1209 16:57:48.302823 4840 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:48 crc kubenswrapper[4840]: E1209 16:57:48.302903 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs podName:2099e918-a035-4659-8247-971e3e59c6ef nodeName:}" failed. No retries permitted until 2025-12-09 16:58:04.302881295 +0000 UTC m=+70.293991938 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs") pod "network-metrics-daemon-hc4xq" (UID: "2099e918-a035-4659-8247-971e3e59c6ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.306866 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.309408 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.309436 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.309448 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.309464 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.309477 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.328479 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:46Z\\\",\\\"message\\\":\\\"ect:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1209 16:57:46.455662 6465 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication-operator/metrics]} name:Service_openshift-authentication-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.150:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6ea1fd71-2b40-4361-92ee-3f1ab4ec7414}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:46.455761 6465 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.g\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.412874 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.412949 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.413026 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.413062 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.413088 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.485929 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.486034 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.486052 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.486078 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.486096 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: E1209 16:57:48.506844 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.511765 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.511842 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.511861 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.511884 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.511901 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: E1209 16:57:48.531263 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.536433 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.536559 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.536580 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.536609 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.536628 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: E1209 16:57:48.558226 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.563331 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.563440 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.563469 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.563498 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.563520 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: E1209 16:57:48.579862 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.585059 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.585127 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.585152 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.585181 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.585204 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: E1209 16:57:48.605713 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:48Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:48 crc kubenswrapper[4840]: E1209 16:57:48.606013 4840 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.608512 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.608587 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.608614 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.608642 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.608661 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.711509 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.711558 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.711572 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.711594 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.711645 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.815282 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.815328 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.815340 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.815355 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.815365 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.918539 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.918910 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.918919 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.918932 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:48 crc kubenswrapper[4840]: I1209 16:57:48.918940 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:48Z","lastTransitionTime":"2025-12-09T16:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.022390 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.022461 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.022487 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.022519 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.022542 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:49Z","lastTransitionTime":"2025-12-09T16:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.125778 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.125842 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.125859 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.125881 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.125898 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:49Z","lastTransitionTime":"2025-12-09T16:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.230155 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.230214 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.230232 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.230265 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.230301 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:49Z","lastTransitionTime":"2025-12-09T16:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.333878 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.333949 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.334006 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.334040 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.334062 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:49Z","lastTransitionTime":"2025-12-09T16:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.437876 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.437947 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.437993 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.438022 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.438040 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:49Z","lastTransitionTime":"2025-12-09T16:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.541295 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.541364 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.541386 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.541417 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.541442 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:49Z","lastTransitionTime":"2025-12-09T16:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.607828 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:49 crc kubenswrapper[4840]: E1209 16:57:49.608059 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.608102 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.608125 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.608115 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:49 crc kubenswrapper[4840]: E1209 16:57:49.608211 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:49 crc kubenswrapper[4840]: E1209 16:57:49.608329 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:49 crc kubenswrapper[4840]: E1209 16:57:49.608525 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.644611 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.644686 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.644703 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.644727 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.644754 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:49Z","lastTransitionTime":"2025-12-09T16:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.748099 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.748161 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.748179 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.748201 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.748219 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:49Z","lastTransitionTime":"2025-12-09T16:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.851020 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.851079 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.851099 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.851122 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.851141 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:49Z","lastTransitionTime":"2025-12-09T16:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.899465 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.911628 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.920597 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:49Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.942128 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:49Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.954227 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.954304 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.954323 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.954345 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.954379 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:49Z","lastTransitionTime":"2025-12-09T16:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.965311 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:49Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:49 crc kubenswrapper[4840]: I1209 16:57:49.988813 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:49Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.011153 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.044375 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:46Z\\\",\\\"message\\\":\\\"ect:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1209 16:57:46.455662 6465 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication-operator/metrics]} name:Service_openshift-authentication-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.150:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6ea1fd71-2b40-4361-92ee-3f1ab4ec7414}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:46.455761 6465 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.g\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.057739 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.057797 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.057818 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.057846 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.057872 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:50Z","lastTransitionTime":"2025-12-09T16:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.069749 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.090297 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.109665 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.123876 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.141611 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.160539 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.160580 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.160594 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.160611 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.160623 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:50Z","lastTransitionTime":"2025-12-09T16:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.160579 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.175167 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.198706 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.216611 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.233694 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:50Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.267303 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.267364 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.267381 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.267405 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.268149 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:50Z","lastTransitionTime":"2025-12-09T16:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.371752 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.371823 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.371840 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.371865 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.371882 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:50Z","lastTransitionTime":"2025-12-09T16:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.475961 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.476061 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.476080 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.476104 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.476122 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:50Z","lastTransitionTime":"2025-12-09T16:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.580037 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.580115 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.580137 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.580167 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.580191 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:50Z","lastTransitionTime":"2025-12-09T16:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.683934 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.684049 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.684073 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.684104 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.684127 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:50Z","lastTransitionTime":"2025-12-09T16:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.787449 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.787512 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.787530 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.787553 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.787570 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:50Z","lastTransitionTime":"2025-12-09T16:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.890726 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.890791 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.890805 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.890822 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.890835 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:50Z","lastTransitionTime":"2025-12-09T16:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.993919 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.994032 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.994065 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.994094 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:50 crc kubenswrapper[4840]: I1209 16:57:50.994116 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:50Z","lastTransitionTime":"2025-12-09T16:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.097476 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.097549 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.097582 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.097611 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.097632 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:51Z","lastTransitionTime":"2025-12-09T16:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.199852 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.199926 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.199944 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.200010 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.200038 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:51Z","lastTransitionTime":"2025-12-09T16:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.302998 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.303073 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.303099 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.303129 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.303152 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:51Z","lastTransitionTime":"2025-12-09T16:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.405660 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.405795 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.405823 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.405853 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.405876 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:51Z","lastTransitionTime":"2025-12-09T16:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.509386 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.509464 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.509489 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.509514 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.509534 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:51Z","lastTransitionTime":"2025-12-09T16:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.607913 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.608030 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.608084 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.607945 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:51 crc kubenswrapper[4840]: E1209 16:57:51.608264 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:51 crc kubenswrapper[4840]: E1209 16:57:51.608423 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:51 crc kubenswrapper[4840]: E1209 16:57:51.608536 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:51 crc kubenswrapper[4840]: E1209 16:57:51.608760 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.612144 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.612189 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.612209 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.612325 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.612424 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:51Z","lastTransitionTime":"2025-12-09T16:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.715437 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.715517 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.715546 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.715581 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.715613 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:51Z","lastTransitionTime":"2025-12-09T16:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.818753 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.818947 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.819569 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.819607 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.819623 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:51Z","lastTransitionTime":"2025-12-09T16:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.922475 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.922533 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.922548 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.922573 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:51 crc kubenswrapper[4840]: I1209 16:57:51.922598 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:51Z","lastTransitionTime":"2025-12-09T16:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.025244 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.025289 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.025301 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.025317 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.025329 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:52Z","lastTransitionTime":"2025-12-09T16:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.128487 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.128555 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.128577 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.128650 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.128722 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:52Z","lastTransitionTime":"2025-12-09T16:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.232481 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.232546 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.232564 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.232588 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.232607 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:52Z","lastTransitionTime":"2025-12-09T16:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.335458 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.335522 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.335546 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.335577 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.335599 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:52Z","lastTransitionTime":"2025-12-09T16:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.438567 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.438615 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.438627 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.438643 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.438654 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:52Z","lastTransitionTime":"2025-12-09T16:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.541723 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.541794 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.541813 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.541839 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.541857 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:52Z","lastTransitionTime":"2025-12-09T16:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.644258 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.644355 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.644376 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.644402 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.644420 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:52Z","lastTransitionTime":"2025-12-09T16:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.747092 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.747167 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.747203 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.747221 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.747235 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:52Z","lastTransitionTime":"2025-12-09T16:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.849906 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.849937 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.849947 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.849984 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.849995 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:52Z","lastTransitionTime":"2025-12-09T16:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.952852 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.952911 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.952923 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.952940 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:52 crc kubenswrapper[4840]: I1209 16:57:52.952951 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:52Z","lastTransitionTime":"2025-12-09T16:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.056258 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.056309 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.056317 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.056330 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.056340 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:53Z","lastTransitionTime":"2025-12-09T16:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.158909 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.159010 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.159044 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.159074 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.159094 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:53Z","lastTransitionTime":"2025-12-09T16:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.261554 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.261597 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.261608 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.261624 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.261635 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:53Z","lastTransitionTime":"2025-12-09T16:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.364221 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.364262 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.364271 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.364287 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.364297 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:53Z","lastTransitionTime":"2025-12-09T16:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.466458 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.466501 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.466513 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.466529 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.466540 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:53Z","lastTransitionTime":"2025-12-09T16:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.570378 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.570451 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.570472 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.570494 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.570512 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:53Z","lastTransitionTime":"2025-12-09T16:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.607922 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.608025 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.608055 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.607954 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:53 crc kubenswrapper[4840]: E1209 16:57:53.608196 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:53 crc kubenswrapper[4840]: E1209 16:57:53.608344 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:53 crc kubenswrapper[4840]: E1209 16:57:53.608489 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:53 crc kubenswrapper[4840]: E1209 16:57:53.608604 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.673558 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.673621 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.673639 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.673662 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.673679 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:53Z","lastTransitionTime":"2025-12-09T16:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.776625 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.776704 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.776722 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.776745 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.776763 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:53Z","lastTransitionTime":"2025-12-09T16:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.879651 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.879712 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.879728 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.879751 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.879770 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:53Z","lastTransitionTime":"2025-12-09T16:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.982179 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.982232 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.982255 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.982287 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:53 crc kubenswrapper[4840]: I1209 16:57:53.982309 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:53Z","lastTransitionTime":"2025-12-09T16:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.084536 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.084577 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.084587 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.084603 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.084614 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:54Z","lastTransitionTime":"2025-12-09T16:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.186955 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.187017 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.187028 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.187048 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.187059 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:54Z","lastTransitionTime":"2025-12-09T16:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.290040 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.290105 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.290114 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.290134 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.290153 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:54Z","lastTransitionTime":"2025-12-09T16:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.392784 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.392831 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.392841 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.392856 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.392868 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:54Z","lastTransitionTime":"2025-12-09T16:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.496063 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.496157 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.496180 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.496206 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.496224 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:54Z","lastTransitionTime":"2025-12-09T16:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.599477 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.599534 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.599551 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.599574 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.599591 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:54Z","lastTransitionTime":"2025-12-09T16:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.645286 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:46Z\\\",\\\"message\\\":\\\"ect:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1209 16:57:46.455662 6465 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication-operator/metrics]} name:Service_openshift-authentication-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.150:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6ea1fd71-2b40-4361-92ee-3f1ab4ec7414}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:46.455761 6465 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.g\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.666916 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.689502 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.702311 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.702520 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.702678 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.702866 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.703040 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:54Z","lastTransitionTime":"2025-12-09T16:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.712308 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.732275 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.749864 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.767647 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.787486 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.804722 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.806702 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.806741 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.806757 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.806780 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.806799 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:54Z","lastTransitionTime":"2025-12-09T16:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.842400 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.871884 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.887550 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.902120 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b0e7fcd-e3db-4b2f-b0b0-7800c18b3abb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7749be321a920bec071d4eaa088051a73b6a4337162342c5cd9b3ce68c483d10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdce315b9b993e9decc1b8f96634ceb63471f2287cd1d4859e26e7b37924df5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b40a9b19ba16f8f7e6b72c162d1792bb6cd1460e41c5911dd032ab8d673329f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.909238 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.909270 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.909282 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.909298 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.909310 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:54Z","lastTransitionTime":"2025-12-09T16:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.922788 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.941899 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.957079 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:54 crc kubenswrapper[4840]: I1209 16:57:54.968867 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:54Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.010817 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.010857 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.010866 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.010882 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.010894 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:55Z","lastTransitionTime":"2025-12-09T16:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.114148 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.114208 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.114226 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.114250 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.114268 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:55Z","lastTransitionTime":"2025-12-09T16:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.217957 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.218052 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.218076 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.218104 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.218126 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:55Z","lastTransitionTime":"2025-12-09T16:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.321075 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.321136 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.321154 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.321177 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.321194 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:55Z","lastTransitionTime":"2025-12-09T16:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.424124 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.424171 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.424187 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.424206 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.424223 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:55Z","lastTransitionTime":"2025-12-09T16:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.527798 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.527847 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.527864 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.527930 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.527950 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:55Z","lastTransitionTime":"2025-12-09T16:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.607878 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.608041 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:55 crc kubenswrapper[4840]: E1209 16:57:55.608083 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.608340 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:55 crc kubenswrapper[4840]: E1209 16:57:55.608292 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.608346 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:55 crc kubenswrapper[4840]: E1209 16:57:55.608598 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:55 crc kubenswrapper[4840]: E1209 16:57:55.608447 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.630127 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.630179 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.630196 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.630219 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.630236 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:55Z","lastTransitionTime":"2025-12-09T16:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.733682 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.733729 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.733738 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.733752 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.733762 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:55Z","lastTransitionTime":"2025-12-09T16:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.837145 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.837196 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.837206 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.837222 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.837233 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:55Z","lastTransitionTime":"2025-12-09T16:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.940102 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.940169 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.940187 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.940212 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:55 crc kubenswrapper[4840]: I1209 16:57:55.940230 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:55Z","lastTransitionTime":"2025-12-09T16:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.043676 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.043741 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.043757 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.043782 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.043799 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:56Z","lastTransitionTime":"2025-12-09T16:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.146735 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.146794 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.146810 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.146834 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.146858 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:56Z","lastTransitionTime":"2025-12-09T16:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.249840 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.249899 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.249917 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.249940 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.249958 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:56Z","lastTransitionTime":"2025-12-09T16:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.353468 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.353605 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.353676 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.353703 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.353761 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:56Z","lastTransitionTime":"2025-12-09T16:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.456613 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.456671 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.456687 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.456709 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.456725 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:56Z","lastTransitionTime":"2025-12-09T16:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.559400 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.559467 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.559488 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.559513 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.559530 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:56Z","lastTransitionTime":"2025-12-09T16:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.662589 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.662681 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.662700 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.662723 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.662764 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:56Z","lastTransitionTime":"2025-12-09T16:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.765033 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.765105 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.765122 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.765147 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.765163 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:56Z","lastTransitionTime":"2025-12-09T16:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.868009 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.868079 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.868095 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.868126 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.868145 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:56Z","lastTransitionTime":"2025-12-09T16:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.970698 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.970776 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.970799 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.970827 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:56 crc kubenswrapper[4840]: I1209 16:57:56.970848 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:56Z","lastTransitionTime":"2025-12-09T16:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.072879 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.072933 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.072949 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.072998 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.073015 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:57Z","lastTransitionTime":"2025-12-09T16:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.175853 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.175901 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.175926 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.175950 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.176006 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:57Z","lastTransitionTime":"2025-12-09T16:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.279073 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.279117 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.279126 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.279142 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.279152 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:57Z","lastTransitionTime":"2025-12-09T16:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.381289 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.381343 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.381361 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.381379 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.381391 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:57Z","lastTransitionTime":"2025-12-09T16:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.483272 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.483334 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.483347 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.483362 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.483374 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:57Z","lastTransitionTime":"2025-12-09T16:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.584954 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.585041 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.585058 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.585085 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.585103 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:57Z","lastTransitionTime":"2025-12-09T16:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.607878 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.607946 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.608008 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:57 crc kubenswrapper[4840]: E1209 16:57:57.608147 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.608211 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:57 crc kubenswrapper[4840]: E1209 16:57:57.608337 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:57 crc kubenswrapper[4840]: E1209 16:57:57.608485 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:57 crc kubenswrapper[4840]: E1209 16:57:57.608596 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.690264 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.690392 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.690413 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.690432 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.690445 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:57Z","lastTransitionTime":"2025-12-09T16:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.792884 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.792917 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.792925 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.792937 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.792945 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:57Z","lastTransitionTime":"2025-12-09T16:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.896000 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.896070 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.896094 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.896121 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.896142 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:57Z","lastTransitionTime":"2025-12-09T16:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.998618 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.998687 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.998707 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.998735 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:57 crc kubenswrapper[4840]: I1209 16:57:57.998755 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:57Z","lastTransitionTime":"2025-12-09T16:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.101085 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.101153 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.101163 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.101183 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.101197 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.204349 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.204386 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.204395 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.204412 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.204421 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.307986 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.308033 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.308042 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.308057 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.308070 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.410633 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.410670 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.410681 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.410697 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.410709 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.512617 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.512659 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.512669 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.512684 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.512694 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.614825 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.614862 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.614873 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.614886 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.614898 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.634001 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.634060 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.634084 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.634110 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.634130 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: E1209 16:57:58.653840 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:58Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.658424 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.658492 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.658518 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.658548 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.658568 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: E1209 16:57:58.673085 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:58Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.677142 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.677172 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.677184 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.677200 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.677213 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: E1209 16:57:58.692954 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:58Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.696606 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.696702 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.696728 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.696754 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.696773 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: E1209 16:57:58.711465 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:58Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.715293 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.715330 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.715341 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.715357 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.715368 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: E1209 16:57:58.731137 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:57:58Z is after 2025-08-24T17:21:41Z" Dec 09 16:57:58 crc kubenswrapper[4840]: E1209 16:57:58.731296 4840 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.733633 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.733669 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.733683 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.733700 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.733715 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.836221 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.836518 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.836531 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.836546 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.836557 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.940315 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.940409 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.940430 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.940487 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:58 crc kubenswrapper[4840]: I1209 16:57:58.940508 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:58Z","lastTransitionTime":"2025-12-09T16:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.043409 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.043440 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.043450 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.043466 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.043477 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:59Z","lastTransitionTime":"2025-12-09T16:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.145949 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.146007 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.146016 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.146029 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.146039 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:59Z","lastTransitionTime":"2025-12-09T16:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.248035 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.248066 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.248075 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.248087 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.248096 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:59Z","lastTransitionTime":"2025-12-09T16:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.350798 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.350845 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.350856 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.350871 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.350881 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:59Z","lastTransitionTime":"2025-12-09T16:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.453846 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.453897 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.453908 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.453928 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.453940 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:59Z","lastTransitionTime":"2025-12-09T16:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.557116 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.557173 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.557188 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.557211 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.557226 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:59Z","lastTransitionTime":"2025-12-09T16:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.608468 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.608512 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.608544 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.608555 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:57:59 crc kubenswrapper[4840]: E1209 16:57:59.608624 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:57:59 crc kubenswrapper[4840]: E1209 16:57:59.608715 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:57:59 crc kubenswrapper[4840]: E1209 16:57:59.608866 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:57:59 crc kubenswrapper[4840]: E1209 16:57:59.609047 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.660324 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.660371 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.660387 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.660409 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.660428 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:59Z","lastTransitionTime":"2025-12-09T16:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.763414 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.763510 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.763583 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.763614 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.763637 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:59Z","lastTransitionTime":"2025-12-09T16:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.867057 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.867132 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.867156 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.867180 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.867198 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:59Z","lastTransitionTime":"2025-12-09T16:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.970283 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.970320 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.970329 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.970344 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:57:59 crc kubenswrapper[4840]: I1209 16:57:59.970353 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:57:59Z","lastTransitionTime":"2025-12-09T16:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.072701 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.072747 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.072758 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.072775 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.072785 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:00Z","lastTransitionTime":"2025-12-09T16:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.175008 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.175056 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.175066 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.175080 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.175090 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:00Z","lastTransitionTime":"2025-12-09T16:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.277314 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.277352 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.277364 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.277381 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.277393 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:00Z","lastTransitionTime":"2025-12-09T16:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.380731 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.380793 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.380808 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.380830 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.380844 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:00Z","lastTransitionTime":"2025-12-09T16:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.483372 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.483422 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.483432 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.483449 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.483461 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:00Z","lastTransitionTime":"2025-12-09T16:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.586500 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.586562 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.586590 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.586631 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.586657 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:00Z","lastTransitionTime":"2025-12-09T16:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.608507 4840 scope.go:117] "RemoveContainer" containerID="eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32" Dec 09 16:58:00 crc kubenswrapper[4840]: E1209 16:58:00.608698 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.689411 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.689491 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.689516 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.689545 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.689567 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:00Z","lastTransitionTime":"2025-12-09T16:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.791635 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.791678 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.791686 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.791700 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.791710 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:00Z","lastTransitionTime":"2025-12-09T16:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.894024 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.894086 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.894103 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.894127 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.894144 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:00Z","lastTransitionTime":"2025-12-09T16:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.996637 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.996713 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.996725 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.996746 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:00 crc kubenswrapper[4840]: I1209 16:58:00.996759 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:00Z","lastTransitionTime":"2025-12-09T16:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.099369 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.099436 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.099455 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.099479 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.099496 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:01Z","lastTransitionTime":"2025-12-09T16:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.201884 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.201929 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.201940 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.201959 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.201986 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:01Z","lastTransitionTime":"2025-12-09T16:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.304239 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.304282 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.304294 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.304317 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.304328 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:01Z","lastTransitionTime":"2025-12-09T16:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.407788 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.407837 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.407849 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.407864 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.407875 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:01Z","lastTransitionTime":"2025-12-09T16:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.510436 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.510512 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.510538 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.510566 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.510589 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:01Z","lastTransitionTime":"2025-12-09T16:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.607531 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.607630 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.607728 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:01 crc kubenswrapper[4840]: E1209 16:58:01.607720 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.607754 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:01 crc kubenswrapper[4840]: E1209 16:58:01.607872 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:01 crc kubenswrapper[4840]: E1209 16:58:01.607980 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:01 crc kubenswrapper[4840]: E1209 16:58:01.608105 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.613641 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.613676 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.613689 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.613704 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.613718 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:01Z","lastTransitionTime":"2025-12-09T16:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.716522 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.716571 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.716593 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.716608 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.716617 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:01Z","lastTransitionTime":"2025-12-09T16:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.819331 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.819406 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.819419 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.819458 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.819470 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:01Z","lastTransitionTime":"2025-12-09T16:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.922162 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.922219 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.922234 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.922256 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:01 crc kubenswrapper[4840]: I1209 16:58:01.922272 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:01Z","lastTransitionTime":"2025-12-09T16:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.024646 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.024711 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.024721 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.024738 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.024749 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:02Z","lastTransitionTime":"2025-12-09T16:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.127410 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.127456 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.127473 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.127498 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.127515 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:02Z","lastTransitionTime":"2025-12-09T16:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.230360 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.230395 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.230403 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.230418 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.230431 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:02Z","lastTransitionTime":"2025-12-09T16:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.333389 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.333430 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.333441 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.333457 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.333469 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:02Z","lastTransitionTime":"2025-12-09T16:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.436505 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.436555 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.436570 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.436590 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.436625 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:02Z","lastTransitionTime":"2025-12-09T16:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.539297 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.539359 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.539370 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.539385 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.539396 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:02Z","lastTransitionTime":"2025-12-09T16:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.641128 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.641155 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.641163 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.641179 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.641189 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:02Z","lastTransitionTime":"2025-12-09T16:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.743292 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.743357 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.743375 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.743402 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.743421 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:02Z","lastTransitionTime":"2025-12-09T16:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.846927 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.847000 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.847011 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.847027 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.847038 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:02Z","lastTransitionTime":"2025-12-09T16:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.949818 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.949884 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.949903 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.949929 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:02 crc kubenswrapper[4840]: I1209 16:58:02.949957 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:02Z","lastTransitionTime":"2025-12-09T16:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.053308 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.053375 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.053397 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.053428 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.053452 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:03Z","lastTransitionTime":"2025-12-09T16:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.156634 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.156743 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.156771 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.156959 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.157052 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:03Z","lastTransitionTime":"2025-12-09T16:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.259612 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.259664 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.259677 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.259696 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.259709 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:03Z","lastTransitionTime":"2025-12-09T16:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.362124 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.362167 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.362175 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.362189 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.362198 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:03Z","lastTransitionTime":"2025-12-09T16:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.463947 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.464005 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.464016 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.464032 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.464043 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:03Z","lastTransitionTime":"2025-12-09T16:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.566742 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.566783 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.566796 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.566814 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.566830 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:03Z","lastTransitionTime":"2025-12-09T16:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.608221 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:03 crc kubenswrapper[4840]: E1209 16:58:03.608337 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.608684 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:03 crc kubenswrapper[4840]: E1209 16:58:03.608739 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.608774 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:03 crc kubenswrapper[4840]: E1209 16:58:03.608809 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.608841 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:03 crc kubenswrapper[4840]: E1209 16:58:03.608877 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.669267 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.669297 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.669305 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.669317 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.669325 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:03Z","lastTransitionTime":"2025-12-09T16:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.772255 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.772293 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.772303 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.772317 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.772328 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:03Z","lastTransitionTime":"2025-12-09T16:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.874263 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.874326 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.874342 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.874367 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.874383 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:03Z","lastTransitionTime":"2025-12-09T16:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.976605 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.976638 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.976647 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.976660 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:03 crc kubenswrapper[4840]: I1209 16:58:03.976669 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:03Z","lastTransitionTime":"2025-12-09T16:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.079032 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.079088 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.079105 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.079128 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.079146 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:04Z","lastTransitionTime":"2025-12-09T16:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.181281 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.181340 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.181358 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.181386 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.181402 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:04Z","lastTransitionTime":"2025-12-09T16:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.284026 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.284087 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.284103 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.284117 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.284127 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:04Z","lastTransitionTime":"2025-12-09T16:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.381091 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:04 crc kubenswrapper[4840]: E1209 16:58:04.381265 4840 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:58:04 crc kubenswrapper[4840]: E1209 16:58:04.381345 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs podName:2099e918-a035-4659-8247-971e3e59c6ef nodeName:}" failed. No retries permitted until 2025-12-09 16:58:36.381324103 +0000 UTC m=+102.372434816 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs") pod "network-metrics-daemon-hc4xq" (UID: "2099e918-a035-4659-8247-971e3e59c6ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.386876 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.386929 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.386946 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.387000 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.387018 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:04Z","lastTransitionTime":"2025-12-09T16:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.489856 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.489905 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.489921 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.489938 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.489952 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:04Z","lastTransitionTime":"2025-12-09T16:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.592296 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.592329 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.592337 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.592351 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.592360 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:04Z","lastTransitionTime":"2025-12-09T16:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.619094 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b0e7fcd-e3db-4b2f-b0b0-7800c18b3abb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7749be321a920bec071d4eaa088051a73b6a4337162342c5cd9b3ce68c483d10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdce315b9b993e9decc1b8f96634ceb63471f2287cd1d4859e26e7b37924df5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b40a9b19ba16f8f7e6b72c162d1792bb6cd1460e41c5911dd032ab8d673329f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.630834 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.648125 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.661069 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.672055 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.681904 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.693742 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.694529 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.694565 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.694577 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.694594 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.694606 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:04Z","lastTransitionTime":"2025-12-09T16:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.709804 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.730161 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.742413 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.768141 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:46Z\\\",\\\"message\\\":\\\"ect:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1209 16:57:46.455662 6465 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication-operator/metrics]} name:Service_openshift-authentication-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.150:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6ea1fd71-2b40-4361-92ee-3f1ab4ec7414}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:46.455761 6465 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.g\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.778039 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.792638 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.796451 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.796487 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.796498 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.796514 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.796527 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:04Z","lastTransitionTime":"2025-12-09T16:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.803165 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.815032 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.826236 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.835524 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:04Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.898709 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.898742 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.898752 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.898768 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:04 crc kubenswrapper[4840]: I1209 16:58:04.898781 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:04Z","lastTransitionTime":"2025-12-09T16:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.000809 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.000887 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.000898 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.000914 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.000927 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:05Z","lastTransitionTime":"2025-12-09T16:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.041686 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n2cr9_9c465ec1-5011-46d7-bcf3-df79d8b4543b/kube-multus/0.log" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.041739 4840 generic.go:334] "Generic (PLEG): container finished" podID="9c465ec1-5011-46d7-bcf3-df79d8b4543b" containerID="973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9" exitCode=1 Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.041769 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n2cr9" event={"ID":"9c465ec1-5011-46d7-bcf3-df79d8b4543b","Type":"ContainerDied","Data":"973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9"} Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.042144 4840 scope.go:117] "RemoveContainer" containerID="973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.056554 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.072204 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.082123 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.091550 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.101143 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.102620 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.102644 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.102653 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.102666 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.102677 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:05Z","lastTransitionTime":"2025-12-09T16:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.111628 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.122398 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.135270 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.146005 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.155720 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.166351 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b0e7fcd-e3db-4b2f-b0b0-7800c18b3abb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7749be321a920bec071d4eaa088051a73b6a4337162342c5cd9b3ce68c483d10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdce315b9b993e9decc1b8f96634ceb63471f2287cd1d4859e26e7b37924df5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b40a9b19ba16f8f7e6b72c162d1792bb6cd1460e41c5911dd032ab8d673329f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.177212 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.190206 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:58:04Z\\\",\\\"message\\\":\\\"2025-12-09T16:57:19+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5e81698f-3288-4b31-8541-8c6348cf0fdd\\\\n2025-12-09T16:57:19+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5e81698f-3288-4b31-8541-8c6348cf0fdd to /host/opt/cni/bin/\\\\n2025-12-09T16:57:19Z [verbose] multus-daemon started\\\\n2025-12-09T16:57:19Z [verbose] Readiness Indicator file check\\\\n2025-12-09T16:58:04Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.201929 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.204306 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.204336 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.204343 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.204357 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.204367 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:05Z","lastTransitionTime":"2025-12-09T16:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.213776 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.233025 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:46Z\\\",\\\"message\\\":\\\"ect:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1209 16:57:46.455662 6465 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication-operator/metrics]} name:Service_openshift-authentication-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.150:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6ea1fd71-2b40-4361-92ee-3f1ab4ec7414}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:46.455761 6465 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.g\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.244874 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:05Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.306533 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.306569 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.306578 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.306592 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.306602 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:05Z","lastTransitionTime":"2025-12-09T16:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.408775 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.408806 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.408816 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.408829 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.408838 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:05Z","lastTransitionTime":"2025-12-09T16:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.511423 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.511468 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.511478 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.511491 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.511500 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:05Z","lastTransitionTime":"2025-12-09T16:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.608185 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.608223 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.608202 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.608188 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:05 crc kubenswrapper[4840]: E1209 16:58:05.608299 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:05 crc kubenswrapper[4840]: E1209 16:58:05.608404 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:05 crc kubenswrapper[4840]: E1209 16:58:05.608472 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:05 crc kubenswrapper[4840]: E1209 16:58:05.608520 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.613686 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.613713 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.613723 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.613736 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.613745 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:05Z","lastTransitionTime":"2025-12-09T16:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.715669 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.715706 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.715716 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.715728 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.715737 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:05Z","lastTransitionTime":"2025-12-09T16:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.817683 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.817714 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.817722 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.817735 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.817744 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:05Z","lastTransitionTime":"2025-12-09T16:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.920475 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.920514 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.920525 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.920540 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:05 crc kubenswrapper[4840]: I1209 16:58:05.920550 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:05Z","lastTransitionTime":"2025-12-09T16:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.022897 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.022993 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.023018 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.023047 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.023069 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:06Z","lastTransitionTime":"2025-12-09T16:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.045920 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n2cr9_9c465ec1-5011-46d7-bcf3-df79d8b4543b/kube-multus/0.log" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.046004 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n2cr9" event={"ID":"9c465ec1-5011-46d7-bcf3-df79d8b4543b","Type":"ContainerStarted","Data":"cf116e74f5c75d356cca263037e2a3f3691e52068bed58871445e273a1092786"} Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.057728 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b0e7fcd-e3db-4b2f-b0b0-7800c18b3abb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7749be321a920bec071d4eaa088051a73b6a4337162342c5cd9b3ce68c483d10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdce315b9b993e9decc1b8f96634ceb63471f2287cd1d4859e26e7b37924df5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b40a9b19ba16f8f7e6b72c162d1792bb6cd1460e41c5911dd032ab8d673329f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.071101 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.087670 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.101543 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.111131 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.122930 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.125533 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.125567 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.125575 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.125592 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.125603 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:06Z","lastTransitionTime":"2025-12-09T16:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.136481 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf116e74f5c75d356cca263037e2a3f3691e52068bed58871445e273a1092786\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:58:04Z\\\",\\\"message\\\":\\\"2025-12-09T16:57:19+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5e81698f-3288-4b31-8541-8c6348cf0fdd\\\\n2025-12-09T16:57:19+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5e81698f-3288-4b31-8541-8c6348cf0fdd to /host/opt/cni/bin/\\\\n2025-12-09T16:57:19Z [verbose] multus-daemon started\\\\n2025-12-09T16:57:19Z [verbose] Readiness Indicator file check\\\\n2025-12-09T16:58:04Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.146936 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.159690 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.171935 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.193309 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:46Z\\\",\\\"message\\\":\\\"ect:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1209 16:57:46.455662 6465 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication-operator/metrics]} name:Service_openshift-authentication-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.150:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6ea1fd71-2b40-4361-92ee-3f1ab4ec7414}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:46.455761 6465 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.g\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.201889 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.210121 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.220400 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.227652 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.227692 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.227728 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.227744 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.227755 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:06Z","lastTransitionTime":"2025-12-09T16:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.233787 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.243562 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.251298 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:06Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.329898 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.329930 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.329940 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.329956 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.329979 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:06Z","lastTransitionTime":"2025-12-09T16:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.432179 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.432229 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.432251 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.432278 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.432300 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:06Z","lastTransitionTime":"2025-12-09T16:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.534426 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.534469 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.534482 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.534499 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.534512 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:06Z","lastTransitionTime":"2025-12-09T16:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.636469 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.636496 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.636504 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.636515 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.636524 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:06Z","lastTransitionTime":"2025-12-09T16:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.738737 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.738818 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.738838 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.738861 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.738878 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:06Z","lastTransitionTime":"2025-12-09T16:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.842173 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.842223 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.842237 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.842255 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.842268 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:06Z","lastTransitionTime":"2025-12-09T16:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.945134 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.945196 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.945219 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.945249 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:06 crc kubenswrapper[4840]: I1209 16:58:06.945270 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:06Z","lastTransitionTime":"2025-12-09T16:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.047839 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.047882 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.047893 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.047908 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.047918 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:07Z","lastTransitionTime":"2025-12-09T16:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.155272 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.155329 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.155341 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.155359 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.155373 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:07Z","lastTransitionTime":"2025-12-09T16:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.257998 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.258058 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.258070 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.258087 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.258098 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:07Z","lastTransitionTime":"2025-12-09T16:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.360306 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.360355 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.360367 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.360384 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.360393 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:07Z","lastTransitionTime":"2025-12-09T16:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.463659 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.463741 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.463768 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.463799 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.463825 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:07Z","lastTransitionTime":"2025-12-09T16:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.566526 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.566599 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.566617 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.566643 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.566661 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:07Z","lastTransitionTime":"2025-12-09T16:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.608233 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.608290 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:07 crc kubenswrapper[4840]: E1209 16:58:07.608363 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.608424 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:07 crc kubenswrapper[4840]: E1209 16:58:07.608570 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.608474 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:07 crc kubenswrapper[4840]: E1209 16:58:07.608788 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:07 crc kubenswrapper[4840]: E1209 16:58:07.608771 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.669182 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.669225 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.669238 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.669256 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.669268 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:07Z","lastTransitionTime":"2025-12-09T16:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.771559 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.771607 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.771615 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.771629 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.771638 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:07Z","lastTransitionTime":"2025-12-09T16:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.873638 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.873681 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.873689 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.873702 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.873712 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:07Z","lastTransitionTime":"2025-12-09T16:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.975940 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.976017 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.976032 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.976050 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:07 crc kubenswrapper[4840]: I1209 16:58:07.976065 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:07Z","lastTransitionTime":"2025-12-09T16:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.078847 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.079209 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.079358 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.079503 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.079639 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.183116 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.183167 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.183178 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.183196 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.183209 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.285331 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.285411 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.285434 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.285464 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.285487 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.388207 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.388480 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.388553 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.388639 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.388733 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.494684 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.494890 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.494899 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.494913 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.494924 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.597779 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.597824 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.597836 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.597852 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.597866 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.700075 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.700347 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.700432 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.700530 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.700625 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.803741 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.804044 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.804233 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.804384 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.804521 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.866172 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.866563 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.866725 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.866861 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.867031 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: E1209 16:58:08.887846 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:08Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.892152 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.892360 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.892487 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.892664 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.892797 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: E1209 16:58:08.911171 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:08Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.915711 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.915900 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.916065 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.916223 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.916385 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: E1209 16:58:08.932904 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:08Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.937613 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.937663 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.937680 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.937703 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.937720 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: E1209 16:58:08.958797 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:08Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.963875 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.963911 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.963923 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.963939 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.963950 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:08 crc kubenswrapper[4840]: E1209 16:58:08.984957 4840 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d7e6f5e-3bc8-4940-b935-65e21247c851\\\",\\\"systemUUID\\\":\\\"996aaa93-f1e3-43a6-a427-94b00d03e134\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:08Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:08 crc kubenswrapper[4840]: E1209 16:58:08.985345 4840 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.987094 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.987174 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.987225 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.987249 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:08 crc kubenswrapper[4840]: I1209 16:58:08.987266 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:08Z","lastTransitionTime":"2025-12-09T16:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.089935 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.090014 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.090046 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.090064 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.090075 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:09Z","lastTransitionTime":"2025-12-09T16:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.192738 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.193720 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.193889 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.194135 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.194324 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:09Z","lastTransitionTime":"2025-12-09T16:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.297244 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.297310 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.297331 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.297358 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.297377 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:09Z","lastTransitionTime":"2025-12-09T16:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.400000 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.400097 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.400126 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.400156 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.400175 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:09Z","lastTransitionTime":"2025-12-09T16:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.502720 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.502766 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.502781 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.502801 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.502816 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:09Z","lastTransitionTime":"2025-12-09T16:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.605597 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.605657 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.605674 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.605697 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.605714 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:09Z","lastTransitionTime":"2025-12-09T16:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.607785 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.607865 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.608206 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:09 crc kubenswrapper[4840]: E1209 16:58:09.608370 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.608413 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:09 crc kubenswrapper[4840]: E1209 16:58:09.608801 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:09 crc kubenswrapper[4840]: E1209 16:58:09.608838 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:09 crc kubenswrapper[4840]: E1209 16:58:09.608890 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.707764 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.707803 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.707815 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.707832 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.707843 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:09Z","lastTransitionTime":"2025-12-09T16:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.810711 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.810761 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.810775 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.810794 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.810806 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:09Z","lastTransitionTime":"2025-12-09T16:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.913194 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.913237 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.913245 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.913275 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:09 crc kubenswrapper[4840]: I1209 16:58:09.913284 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:09Z","lastTransitionTime":"2025-12-09T16:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.015908 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.015944 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.015955 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.015990 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.016002 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:10Z","lastTransitionTime":"2025-12-09T16:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.118815 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.118857 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.118868 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.118883 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.118895 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:10Z","lastTransitionTime":"2025-12-09T16:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.221732 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.222348 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.222443 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.222547 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.222634 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:10Z","lastTransitionTime":"2025-12-09T16:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.325876 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.325934 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.325957 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.326013 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.326032 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:10Z","lastTransitionTime":"2025-12-09T16:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.427842 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.428178 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.428291 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.428393 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.428483 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:10Z","lastTransitionTime":"2025-12-09T16:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.531423 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.531491 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.531507 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.531524 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.531536 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:10Z","lastTransitionTime":"2025-12-09T16:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.633893 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.633938 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.633951 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.633981 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.633992 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:10Z","lastTransitionTime":"2025-12-09T16:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.735925 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.736222 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.736361 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.736461 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.736574 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:10Z","lastTransitionTime":"2025-12-09T16:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.838631 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.838676 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.838686 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.838702 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.838711 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:10Z","lastTransitionTime":"2025-12-09T16:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.941394 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.941760 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.941939 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.942136 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:10 crc kubenswrapper[4840]: I1209 16:58:10.942518 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:10Z","lastTransitionTime":"2025-12-09T16:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.046372 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.046418 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.046430 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.046447 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.046460 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:11Z","lastTransitionTime":"2025-12-09T16:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.148976 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.149010 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.149022 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.149040 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.149051 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:11Z","lastTransitionTime":"2025-12-09T16:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.251598 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.251647 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.251661 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.251680 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.251695 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:11Z","lastTransitionTime":"2025-12-09T16:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.354023 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.354057 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.354067 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.354083 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.354094 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:11Z","lastTransitionTime":"2025-12-09T16:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.458239 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.458310 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.458334 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.458364 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.458385 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:11Z","lastTransitionTime":"2025-12-09T16:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.561190 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.561265 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.561291 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.561317 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.561335 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:11Z","lastTransitionTime":"2025-12-09T16:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.607509 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.607565 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:11 crc kubenswrapper[4840]: E1209 16:58:11.607661 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.607685 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:11 crc kubenswrapper[4840]: E1209 16:58:11.607821 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.607888 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:11 crc kubenswrapper[4840]: E1209 16:58:11.608004 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:11 crc kubenswrapper[4840]: E1209 16:58:11.608096 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.664113 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.664177 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.664194 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.664217 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.664238 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:11Z","lastTransitionTime":"2025-12-09T16:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.767001 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.767351 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.767481 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.767609 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.767739 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:11Z","lastTransitionTime":"2025-12-09T16:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.871547 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.872128 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.872334 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.872525 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.872715 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:11Z","lastTransitionTime":"2025-12-09T16:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.976814 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.976879 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.976896 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.976922 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:11 crc kubenswrapper[4840]: I1209 16:58:11.976939 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:11Z","lastTransitionTime":"2025-12-09T16:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.080119 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.080184 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.080204 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.080228 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.080246 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:12Z","lastTransitionTime":"2025-12-09T16:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.184018 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.184896 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.185118 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.185299 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.185445 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:12Z","lastTransitionTime":"2025-12-09T16:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.288135 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.288961 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.289149 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.289301 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.289459 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:12Z","lastTransitionTime":"2025-12-09T16:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.392500 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.392574 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.392601 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.392630 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.392652 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:12Z","lastTransitionTime":"2025-12-09T16:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.495438 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.495507 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.495532 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.495559 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.495582 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:12Z","lastTransitionTime":"2025-12-09T16:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.598629 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.598710 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.598736 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.598764 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.598792 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:12Z","lastTransitionTime":"2025-12-09T16:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.609515 4840 scope.go:117] "RemoveContainer" containerID="eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.701603 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.702285 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.702412 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.702538 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.702681 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:12Z","lastTransitionTime":"2025-12-09T16:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.806543 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.806927 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.807084 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.807211 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.807341 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:12Z","lastTransitionTime":"2025-12-09T16:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.910989 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.911248 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.911374 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.911476 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:12 crc kubenswrapper[4840]: I1209 16:58:12.911577 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:12Z","lastTransitionTime":"2025-12-09T16:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.013452 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.013491 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.013508 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.013533 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.013551 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:13Z","lastTransitionTime":"2025-12-09T16:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.069835 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/2.log" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.073761 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852"} Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.091038 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.106204 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.115370 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.115406 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.115418 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.115434 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.115446 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:13Z","lastTransitionTime":"2025-12-09T16:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.118339 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.131282 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.141637 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.153174 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.168413 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.185578 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.197119 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.209428 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.217416 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.217464 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.217481 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.217501 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.217516 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:13Z","lastTransitionTime":"2025-12-09T16:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.224152 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b0e7fcd-e3db-4b2f-b0b0-7800c18b3abb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7749be321a920bec071d4eaa088051a73b6a4337162342c5cd9b3ce68c483d10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdce315b9b993e9decc1b8f96634ceb63471f2287cd1d4859e26e7b37924df5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b40a9b19ba16f8f7e6b72c162d1792bb6cd1460e41c5911dd032ab8d673329f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.241991 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.265450 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf116e74f5c75d356cca263037e2a3f3691e52068bed58871445e273a1092786\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:58:04Z\\\",\\\"message\\\":\\\"2025-12-09T16:57:19+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5e81698f-3288-4b31-8541-8c6348cf0fdd\\\\n2025-12-09T16:57:19+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5e81698f-3288-4b31-8541-8c6348cf0fdd to /host/opt/cni/bin/\\\\n2025-12-09T16:57:19Z [verbose] multus-daemon started\\\\n2025-12-09T16:57:19Z [verbose] Readiness Indicator file check\\\\n2025-12-09T16:58:04Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.287053 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.297578 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.317393 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:46Z\\\",\\\"message\\\":\\\"ect:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1209 16:57:46.455662 6465 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication-operator/metrics]} name:Service_openshift-authentication-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.150:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6ea1fd71-2b40-4361-92ee-3f1ab4ec7414}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:46.455761 6465 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.g\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.319114 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.319141 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.319152 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.319165 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.319174 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:13Z","lastTransitionTime":"2025-12-09T16:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.328242 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:13Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.423772 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.423804 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.423813 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.423828 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.423842 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:13Z","lastTransitionTime":"2025-12-09T16:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.526455 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.526706 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.526778 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.526844 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.526919 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:13Z","lastTransitionTime":"2025-12-09T16:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.608502 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.608618 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.608636 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:13 crc kubenswrapper[4840]: E1209 16:58:13.608767 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:13 crc kubenswrapper[4840]: E1209 16:58:13.608825 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:13 crc kubenswrapper[4840]: E1209 16:58:13.608894 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.608952 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:13 crc kubenswrapper[4840]: E1209 16:58:13.609139 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.629904 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.629998 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.630019 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.630045 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.630063 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:13Z","lastTransitionTime":"2025-12-09T16:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.732039 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.732079 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.732091 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.732107 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.732119 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:13Z","lastTransitionTime":"2025-12-09T16:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.834663 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.834717 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.834735 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.834760 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.834778 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:13Z","lastTransitionTime":"2025-12-09T16:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.937820 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.937884 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.937900 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.937924 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:13 crc kubenswrapper[4840]: I1209 16:58:13.937942 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:13Z","lastTransitionTime":"2025-12-09T16:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.040883 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.040946 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.040988 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.041012 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.041029 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:14Z","lastTransitionTime":"2025-12-09T16:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.079899 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/3.log" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.080668 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/2.log" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.085411 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852" exitCode=1 Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.085438 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852"} Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.085516 4840 scope.go:117] "RemoveContainer" containerID="eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.087027 4840 scope.go:117] "RemoveContainer" containerID="1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852" Dec 09 16:58:14 crc kubenswrapper[4840]: E1209 16:58:14.087658 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.103419 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.120951 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.140923 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.144498 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.144544 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.144558 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.144574 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.144586 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:14Z","lastTransitionTime":"2025-12-09T16:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.165157 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.185585 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.200928 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.218228 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b0e7fcd-e3db-4b2f-b0b0-7800c18b3abb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7749be321a920bec071d4eaa088051a73b6a4337162342c5cd9b3ce68c483d10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdce315b9b993e9decc1b8f96634ceb63471f2287cd1d4859e26e7b37924df5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b40a9b19ba16f8f7e6b72c162d1792bb6cd1460e41c5911dd032ab8d673329f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.239616 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.250838 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.250873 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.250905 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.250927 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.250937 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:14Z","lastTransitionTime":"2025-12-09T16:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.261669 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.278786 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.290522 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.299644 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.309490 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf116e74f5c75d356cca263037e2a3f3691e52068bed58871445e273a1092786\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:58:04Z\\\",\\\"message\\\":\\\"2025-12-09T16:57:19+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5e81698f-3288-4b31-8541-8c6348cf0fdd\\\\n2025-12-09T16:57:19+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5e81698f-3288-4b31-8541-8c6348cf0fdd to /host/opt/cni/bin/\\\\n2025-12-09T16:57:19Z [verbose] multus-daemon started\\\\n2025-12-09T16:57:19Z [verbose] Readiness Indicator file check\\\\n2025-12-09T16:58:04Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.322063 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.333177 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.345452 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.353643 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.353710 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.353722 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.353740 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.353801 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:14Z","lastTransitionTime":"2025-12-09T16:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.364048 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:46Z\\\",\\\"message\\\":\\\"ect:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1209 16:57:46.455662 6465 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication-operator/metrics]} name:Service_openshift-authentication-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.150:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6ea1fd71-2b40-4361-92ee-3f1ab4ec7414}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:46.455761 6465 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.g\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:58:13Z\\\",\\\"message\\\":\\\"pi-operator-machine-webhook on namespace openshift-machine-api for network=default : 11.802101ms\\\\nI1209 16:58:13.696628 6860 egressservice_zone_endpointslice.go:80] Ignoring updating openshift-kube-apiserver/apiserver for endpointslice openshift-kube-apiserver/apiserver-r277x as it is not a known egress service\\\\nI1209 16:58:13.696693 6860 egressservice_zone_node.go:110] Processing sync for Egress Service node crc\\\\nI1209 16:58:13.696718 6860 egressservice_zone_node.go:113] Finished syncing Egress Service node crc: 30.171µs\\\\nI1209 16:58:13.696762 6860 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:58:13.697102 6860 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:58:13.697239 6860 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:58:13.697728 6860 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1209 16:58:13.697800 6860 factory.go:656] Stopping watch factory\\\\nI1209 16:58:13.697822 6860 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:58:13.697855 6860 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1209 16:58:13.697870 6860 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 16:58:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.456567 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.456630 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.456651 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.456676 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.456695 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:14Z","lastTransitionTime":"2025-12-09T16:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.559621 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.559676 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.559692 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.559713 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.559730 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:14Z","lastTransitionTime":"2025-12-09T16:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.628540 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4b0e7fcd-e3db-4b2f-b0b0-7800c18b3abb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7749be321a920bec071d4eaa088051a73b6a4337162342c5cd9b3ce68c483d10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdce315b9b993e9decc1b8f96634ceb63471f2287cd1d4859e26e7b37924df5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b40a9b19ba16f8f7e6b72c162d1792bb6cd1460e41c5911dd032ab8d673329f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b93f02e165c5aa5e8464919b03294cbc35c54be4b4510cef6888547ed2a80054\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.645887 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.662514 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.662544 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.662555 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.662569 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.662580 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:14Z","lastTransitionTime":"2025-12-09T16:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.668855 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ad5e84b-9a8c-4644-9327-66c2170ffa58\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e928f1c61ce5da1ff0c275d2b4d0288ffd4950bbcf9a88c65b607a5c096440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29221918f10d695507de6d6145532431e0c0cdaab13cfafa3567ca19c46680ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91a0399c3deb65e684a944be7f68b50b6a3d6eaaf8369347fcd3a8ee99b7dd9e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b76b142a86a7a6c188c24bf80cb4da4314fd825c963b6cb29433aaed5eccc87f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc8b9635cdf5fdd46297f95708075bf2d25a15e0641c3c8ab20b384ad6e0311c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f8b8e1d0eb4511eb866d14715a4e10caaa02918a2ee2844aaf8d633bac8cecd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fc8f0f98ffb12a0593d66facfd5599a7f18dcefefa59b92f366d3a76ac40df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxdfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fnwb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.685266 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe6d320b-3a64-4724-93af-500d38c77974\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c79a9c73635f907289cb73e3f330cf4a8ec24e1b9ec441e87a9dffaa73652a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w2qm4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kr6l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.703631 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2099e918-a035-4659-8247-971e3e59c6ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vk4ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hc4xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.722035 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49bad4c7d257281998367b2917fb40e3ce92b1920a310a335a23ed1672a02b3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.740235 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n2cr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c465ec1-5011-46d7-bcf3-df79d8b4543b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf116e74f5c75d356cca263037e2a3f3691e52068bed58871445e273a1092786\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:58:04Z\\\",\\\"message\\\":\\\"2025-12-09T16:57:19+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5e81698f-3288-4b31-8541-8c6348cf0fdd\\\\n2025-12-09T16:57:19+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5e81698f-3288-4b31-8541-8c6348cf0fdd to /host/opt/cni/bin/\\\\n2025-12-09T16:57:19Z [verbose] multus-daemon started\\\\n2025-12-09T16:57:19Z [verbose] Readiness Indicator file check\\\\n2025-12-09T16:58:04Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vtlb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n2cr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.756748 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3dbfc171-948b-45bd-a6de-d5d99b4dfcdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a290b82524f2d0ac25b5053a33ab8857be31228a23574becb362cffe72602b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b33004bbebaacd5688565d23dd82b5ad1aa79700f5aceb239dd97a3c13b97d16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4961911836419fa76b9b54d3fe9bc048fbd91689130faf999a7f4d7afeb7a20\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.765610 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.765684 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.765707 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.765735 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.765753 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:14Z","lastTransitionTime":"2025-12-09T16:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.776627 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700088c2-3a1d-468f-adf9-91f489a11014\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1209 16:57:07.245468 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 16:57:07.246572 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-586632345/tls.crt::/tmp/serving-cert-586632345/tls.key\\\\\\\"\\\\nI1209 16:57:12.963820 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 16:57:12.973098 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 16:57:12.974275 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 16:57:12.974370 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 16:57:12.974402 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 16:57:12.985527 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 16:57:12.985553 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 16:57:12.985574 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 16:57:12.985588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 16:57:12.985591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 16:57:12.985595 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 16:57:12.985599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 16:57:12.987948 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:56:56Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:56:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:56:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:56:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.793319 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.821787 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33826d17-3660-4069-b173-accfbe7e24b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2aaacc9563bdbfc0f4700958ad63c94b3e11c814d924eedf35aafa17be3f32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:57:46Z\\\",\\\"message\\\":\\\"ect:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.37\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1209 16:57:46.455662 6465 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-authentication-operator/metrics]} name:Service_openshift-authentication-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.150:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6ea1fd71-2b40-4361-92ee-3f1ab4ec7414}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 16:57:46.455761 6465 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1209 16:57:46.455774 6465 obj_retry.g\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T16:58:13Z\\\",\\\"message\\\":\\\"pi-operator-machine-webhook on namespace openshift-machine-api for network=default : 11.802101ms\\\\nI1209 16:58:13.696628 6860 egressservice_zone_endpointslice.go:80] Ignoring updating openshift-kube-apiserver/apiserver for endpointslice openshift-kube-apiserver/apiserver-r277x as it is not a known egress service\\\\nI1209 16:58:13.696693 6860 egressservice_zone_node.go:110] Processing sync for Egress Service node crc\\\\nI1209 16:58:13.696718 6860 egressservice_zone_node.go:113] Finished syncing Egress Service node crc: 30.171µs\\\\nI1209 16:58:13.696762 6860 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:58:13.697102 6860 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:58:13.697239 6860 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1209 16:58:13.697728 6860 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1209 16:58:13.697800 6860 factory.go:656] Stopping watch factory\\\\nI1209 16:58:13.697822 6860 ovnkube.go:599] Stopped ovnkube\\\\nI1209 16:58:13.697855 6860 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1209 16:58:13.697870 6860 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 16:58:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T16:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5gm99\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lpfl9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.840242 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cea6f6f70c6f0358f6a9c56af787e515f745d2dc653908ac74608a46c76aa6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.859763 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e22cc4c691b64ec52e2bb33c805894ae3c0561a6dbd2384f82324fe57710a1ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b42d56d36626c90ab0e322513a541ca7678a55c617988814cd644a39fbc6feec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.869533 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.869584 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.869602 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.869626 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.869646 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:14Z","lastTransitionTime":"2025-12-09T16:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.878399 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.894407 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tdmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f552c10c-4afe-437f-88f7-09946da0d260\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4b8d4d3d6e892145e052b8ef99d072538b2eec4d1c41cb9b42f8edb3c718de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-twbtv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tdmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.910325 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4t29t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbca946d-b14a-4c23-b383-813d02937eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://151840b42e62db5aae3fc9a74ccf139109a8d421575efac56c001843cc6efaff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjmqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4t29t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.927139 4840 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2ad4cd0-9c34-423b-937d-2856dcbbc640\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T16:57:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7720fea3ec3e5e9c9e7016c124e7d8aa9e0e9e500f2e2b17e95140c739c6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39ddfcd51339e4c5bc831ccf6dbc29bca1ee1891e1483783c301b2167542d120\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T16:57:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbngm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T16:57:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-zxql8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T16:58:14Z is after 2025-08-24T17:21:41Z" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.973332 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.973375 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.973388 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.973404 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:14 crc kubenswrapper[4840]: I1209 16:58:14.973416 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:14Z","lastTransitionTime":"2025-12-09T16:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.075868 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.075930 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.075953 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.076021 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.076047 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:15Z","lastTransitionTime":"2025-12-09T16:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.091855 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/3.log" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.178905 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.179006 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.179032 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.179061 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.179083 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:15Z","lastTransitionTime":"2025-12-09T16:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.282168 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.282241 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.282261 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.282284 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.282301 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:15Z","lastTransitionTime":"2025-12-09T16:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.385409 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.385483 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.385509 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.385537 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.385559 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:15Z","lastTransitionTime":"2025-12-09T16:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.489566 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.489660 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.489679 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.489707 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.489726 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:15Z","lastTransitionTime":"2025-12-09T16:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.593137 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.593223 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.593244 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.593269 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.593288 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:15Z","lastTransitionTime":"2025-12-09T16:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.608053 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.608124 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.608076 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:15 crc kubenswrapper[4840]: E1209 16:58:15.608274 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.608333 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:15 crc kubenswrapper[4840]: E1209 16:58:15.608388 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:15 crc kubenswrapper[4840]: E1209 16:58:15.608586 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:15 crc kubenswrapper[4840]: E1209 16:58:15.608675 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.696600 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.696652 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.696664 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.696681 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.696694 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:15Z","lastTransitionTime":"2025-12-09T16:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.799597 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.799654 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.799670 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.799690 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.799705 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:15Z","lastTransitionTime":"2025-12-09T16:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.902123 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.902194 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.902212 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.902238 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:15 crc kubenswrapper[4840]: I1209 16:58:15.902255 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:15Z","lastTransitionTime":"2025-12-09T16:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.005340 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.005405 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.005421 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.005448 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.005465 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:16Z","lastTransitionTime":"2025-12-09T16:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.108281 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.108347 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.108364 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.108388 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.108406 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:16Z","lastTransitionTime":"2025-12-09T16:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.211087 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.211144 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.211157 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.211172 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.211182 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:16Z","lastTransitionTime":"2025-12-09T16:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.313684 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.313736 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.313753 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.313776 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.313793 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:16Z","lastTransitionTime":"2025-12-09T16:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.416934 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.417047 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.417073 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.417105 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.417124 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:16Z","lastTransitionTime":"2025-12-09T16:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.519792 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.519865 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.519888 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.519913 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.519932 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:16Z","lastTransitionTime":"2025-12-09T16:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.623022 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.623083 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.623109 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.623138 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.623159 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:16Z","lastTransitionTime":"2025-12-09T16:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.725607 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.725669 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.725688 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.725712 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.725729 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:16Z","lastTransitionTime":"2025-12-09T16:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.827820 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.827883 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.827898 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.827920 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.827935 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:16Z","lastTransitionTime":"2025-12-09T16:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.931806 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.931856 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.931869 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.931886 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:16 crc kubenswrapper[4840]: I1209 16:58:16.931900 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:16Z","lastTransitionTime":"2025-12-09T16:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.034923 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.035016 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.035052 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.035153 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.035236 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:17Z","lastTransitionTime":"2025-12-09T16:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.137586 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.137628 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.137637 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.137651 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.137662 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:17Z","lastTransitionTime":"2025-12-09T16:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.241017 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.241109 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.241144 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.241179 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.241205 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:17Z","lastTransitionTime":"2025-12-09T16:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.344149 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.344206 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.344227 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.344250 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.344268 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:17Z","lastTransitionTime":"2025-12-09T16:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.449683 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.450319 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.450343 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.450367 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.450392 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:17Z","lastTransitionTime":"2025-12-09T16:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.517852 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.518078 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:21.518039726 +0000 UTC m=+147.509150389 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.518143 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.518197 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.518382 4840 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.518445 4840 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.518463 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:59:21.518444018 +0000 UTC m=+147.509554691 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.518593 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 16:59:21.518558092 +0000 UTC m=+147.509668765 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.553403 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.553458 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.553481 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.553510 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.553533 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:17Z","lastTransitionTime":"2025-12-09T16:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.608468 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.608491 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.608499 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.608546 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.609382 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.609470 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.609595 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.609837 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.619114 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.619216 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.619298 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.619335 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.619373 4840 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.619419 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.619444 4840 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.619462 4840 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.619482 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 16:59:21.619447836 +0000 UTC m=+147.610558509 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:58:17 crc kubenswrapper[4840]: E1209 16:58:17.619534 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 16:59:21.619508838 +0000 UTC m=+147.610619501 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.656816 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.656895 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.656910 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.656930 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.656945 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:17Z","lastTransitionTime":"2025-12-09T16:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.760280 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.760343 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.760363 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.760389 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.760407 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:17Z","lastTransitionTime":"2025-12-09T16:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.863374 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.863447 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.863469 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.863497 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.863520 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:17Z","lastTransitionTime":"2025-12-09T16:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.965840 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.966010 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.966038 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.966072 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:17 crc kubenswrapper[4840]: I1209 16:58:17.966094 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:17Z","lastTransitionTime":"2025-12-09T16:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.069442 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.069530 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.069555 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.069582 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.069599 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:18Z","lastTransitionTime":"2025-12-09T16:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.172364 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.172757 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.172910 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.173112 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.173358 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:18Z","lastTransitionTime":"2025-12-09T16:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.276215 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.276567 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.277043 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.277428 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.277775 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:18Z","lastTransitionTime":"2025-12-09T16:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.381523 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.381611 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.381682 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.381726 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.381748 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:18Z","lastTransitionTime":"2025-12-09T16:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.484680 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.484784 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.484816 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.484846 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.484867 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:18Z","lastTransitionTime":"2025-12-09T16:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.588239 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.588419 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.588444 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.588508 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.588528 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:18Z","lastTransitionTime":"2025-12-09T16:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.691465 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.691537 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.691556 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.691584 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.691603 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:18Z","lastTransitionTime":"2025-12-09T16:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.794945 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.795014 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.795029 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.795077 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.795090 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:18Z","lastTransitionTime":"2025-12-09T16:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.898319 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.898364 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.898377 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.898396 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:18 crc kubenswrapper[4840]: I1209 16:58:18.898409 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:18Z","lastTransitionTime":"2025-12-09T16:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.000799 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.000825 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.000832 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.000844 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.000854 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:19Z","lastTransitionTime":"2025-12-09T16:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.104499 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.104552 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.104568 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.104592 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.104608 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:19Z","lastTransitionTime":"2025-12-09T16:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.207951 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.208035 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.208054 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.208080 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.208101 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:19Z","lastTransitionTime":"2025-12-09T16:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.228478 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.228573 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.228643 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.228677 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.228744 4840 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T16:58:19Z","lastTransitionTime":"2025-12-09T16:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.303279 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv"] Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.303784 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.306196 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.307049 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.307187 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.308191 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.373595 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-tdmlx" podStartSLOduration=62.373576327 podStartE2EDuration="1m2.373576327s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:19.351259304 +0000 UTC m=+85.342369967" watchObservedRunningTime="2025-12-09 16:58:19.373576327 +0000 UTC m=+85.364686970" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.385741 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-4t29t" podStartSLOduration=62.385715978 podStartE2EDuration="1m2.385715978s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:19.373823834 +0000 UTC m=+85.364934477" watchObservedRunningTime="2025-12-09 16:58:19.385715978 +0000 UTC m=+85.376826631" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.398524 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zxql8" podStartSLOduration=61.398500938 podStartE2EDuration="1m1.398500938s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:19.386297105 +0000 UTC m=+85.377407748" watchObservedRunningTime="2025-12-09 16:58:19.398500938 +0000 UTC m=+85.389611591" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.441569 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f6ff2242-eda5-4c50-a61b-2e5f031a149a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.441603 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f6ff2242-eda5-4c50-a61b-2e5f031a149a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.441641 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f6ff2242-eda5-4c50-a61b-2e5f031a149a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.441675 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f6ff2242-eda5-4c50-a61b-2e5f031a149a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.441696 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f6ff2242-eda5-4c50-a61b-2e5f031a149a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.449597 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podStartSLOduration=62.449566519 podStartE2EDuration="1m2.449566519s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:19.449379734 +0000 UTC m=+85.440490427" watchObservedRunningTime="2025-12-09 16:58:19.449566519 +0000 UTC m=+85.440677192" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.477148 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=30.477122112 podStartE2EDuration="30.477122112s" podCreationTimestamp="2025-12-09 16:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:19.47705275 +0000 UTC m=+85.468163393" watchObservedRunningTime="2025-12-09 16:58:19.477122112 +0000 UTC m=+85.468232745" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.524992 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-fnwb4" podStartSLOduration=62.524944104 podStartE2EDuration="1m2.524944104s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:19.524704326 +0000 UTC m=+85.515814979" watchObservedRunningTime="2025-12-09 16:58:19.524944104 +0000 UTC m=+85.516054757" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.542463 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f6ff2242-eda5-4c50-a61b-2e5f031a149a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.542532 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f6ff2242-eda5-4c50-a61b-2e5f031a149a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.542562 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f6ff2242-eda5-4c50-a61b-2e5f031a149a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.542588 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f6ff2242-eda5-4c50-a61b-2e5f031a149a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.542612 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f6ff2242-eda5-4c50-a61b-2e5f031a149a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.542691 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f6ff2242-eda5-4c50-a61b-2e5f031a149a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.543114 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f6ff2242-eda5-4c50-a61b-2e5f031a149a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.544007 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f6ff2242-eda5-4c50-a61b-2e5f031a149a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.552758 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f6ff2242-eda5-4c50-a61b-2e5f031a149a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.575108 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f6ff2242-eda5-4c50-a61b-2e5f031a149a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-dnhkv\" (UID: \"f6ff2242-eda5-4c50-a61b-2e5f031a149a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.578707 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-n2cr9" podStartSLOduration=62.578688537 podStartE2EDuration="1m2.578688537s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:19.578119739 +0000 UTC m=+85.569230372" watchObservedRunningTime="2025-12-09 16:58:19.578688537 +0000 UTC m=+85.569799180" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.607743 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.607817 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.607891 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:19 crc kubenswrapper[4840]: E1209 16:58:19.607902 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.607767 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:19 crc kubenswrapper[4840]: E1209 16:58:19.608060 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:19 crc kubenswrapper[4840]: E1209 16:58:19.608165 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:19 crc kubenswrapper[4840]: E1209 16:58:19.608224 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.626243 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" Dec 09 16:58:19 crc kubenswrapper[4840]: W1209 16:58:19.639115 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf6ff2242_eda5_4c50_a61b_2e5f031a149a.slice/crio-5991afd0d065dda497fa7ef9d0fe60edd27b4b88d9d1b6f5a7ce96f07d66581c WatchSource:0}: Error finding container 5991afd0d065dda497fa7ef9d0fe60edd27b4b88d9d1b6f5a7ce96f07d66581c: Status 404 returned error can't find the container with id 5991afd0d065dda497fa7ef9d0fe60edd27b4b88d9d1b6f5a7ce96f07d66581c Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.651356 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=66.651339187 podStartE2EDuration="1m6.651339187s" podCreationTimestamp="2025-12-09 16:57:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:19.631083768 +0000 UTC m=+85.622194401" watchObservedRunningTime="2025-12-09 16:58:19.651339187 +0000 UTC m=+85.642449820" Dec 09 16:58:19 crc kubenswrapper[4840]: I1209 16:58:19.669229 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=66.669211594 podStartE2EDuration="1m6.669211594s" podCreationTimestamp="2025-12-09 16:57:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:19.651779301 +0000 UTC m=+85.642889934" watchObservedRunningTime="2025-12-09 16:58:19.669211594 +0000 UTC m=+85.660322227" Dec 09 16:58:20 crc kubenswrapper[4840]: I1209 16:58:20.114399 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" event={"ID":"f6ff2242-eda5-4c50-a61b-2e5f031a149a","Type":"ContainerStarted","Data":"308d0c8cc573c562e79f80bf3d31a4b22a64161c11049b4a54da91415c1aabf8"} Dec 09 16:58:20 crc kubenswrapper[4840]: I1209 16:58:20.114478 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" event={"ID":"f6ff2242-eda5-4c50-a61b-2e5f031a149a","Type":"ContainerStarted","Data":"5991afd0d065dda497fa7ef9d0fe60edd27b4b88d9d1b6f5a7ce96f07d66581c"} Dec 09 16:58:20 crc kubenswrapper[4840]: I1209 16:58:20.136679 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnhkv" podStartSLOduration=63.136655502 podStartE2EDuration="1m3.136655502s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:20.135476526 +0000 UTC m=+86.126587209" watchObservedRunningTime="2025-12-09 16:58:20.136655502 +0000 UTC m=+86.127766165" Dec 09 16:58:21 crc kubenswrapper[4840]: I1209 16:58:21.607912 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:21 crc kubenswrapper[4840]: I1209 16:58:21.608184 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:21 crc kubenswrapper[4840]: I1209 16:58:21.608389 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:21 crc kubenswrapper[4840]: E1209 16:58:21.608384 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:21 crc kubenswrapper[4840]: I1209 16:58:21.608467 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:21 crc kubenswrapper[4840]: E1209 16:58:21.608804 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:21 crc kubenswrapper[4840]: E1209 16:58:21.608906 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:21 crc kubenswrapper[4840]: E1209 16:58:21.609002 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:23 crc kubenswrapper[4840]: I1209 16:58:23.608016 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:23 crc kubenswrapper[4840]: I1209 16:58:23.608134 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:23 crc kubenswrapper[4840]: I1209 16:58:23.608178 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:23 crc kubenswrapper[4840]: E1209 16:58:23.608336 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:23 crc kubenswrapper[4840]: I1209 16:58:23.608384 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:23 crc kubenswrapper[4840]: E1209 16:58:23.608523 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:23 crc kubenswrapper[4840]: E1209 16:58:23.608670 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:23 crc kubenswrapper[4840]: E1209 16:58:23.608736 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:25 crc kubenswrapper[4840]: I1209 16:58:25.608551 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:25 crc kubenswrapper[4840]: I1209 16:58:25.608620 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:25 crc kubenswrapper[4840]: I1209 16:58:25.608620 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:25 crc kubenswrapper[4840]: I1209 16:58:25.608712 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:25 crc kubenswrapper[4840]: E1209 16:58:25.608899 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:25 crc kubenswrapper[4840]: E1209 16:58:25.609604 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:25 crc kubenswrapper[4840]: E1209 16:58:25.609719 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:25 crc kubenswrapper[4840]: E1209 16:58:25.609494 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:26 crc kubenswrapper[4840]: I1209 16:58:26.625211 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Dec 09 16:58:27 crc kubenswrapper[4840]: I1209 16:58:27.607851 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:27 crc kubenswrapper[4840]: I1209 16:58:27.608141 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:27 crc kubenswrapper[4840]: E1209 16:58:27.608142 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:27 crc kubenswrapper[4840]: I1209 16:58:27.608216 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:27 crc kubenswrapper[4840]: I1209 16:58:27.608266 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:27 crc kubenswrapper[4840]: E1209 16:58:27.608441 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:27 crc kubenswrapper[4840]: E1209 16:58:27.608548 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:27 crc kubenswrapper[4840]: I1209 16:58:27.609065 4840 scope.go:117] "RemoveContainer" containerID="1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852" Dec 09 16:58:27 crc kubenswrapper[4840]: E1209 16:58:27.609080 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:27 crc kubenswrapper[4840]: E1209 16:58:27.609389 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" Dec 09 16:58:27 crc kubenswrapper[4840]: I1209 16:58:27.624936 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=1.624410937 podStartE2EDuration="1.624410937s" podCreationTimestamp="2025-12-09 16:58:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:27.622744936 +0000 UTC m=+93.613855609" watchObservedRunningTime="2025-12-09 16:58:27.624410937 +0000 UTC m=+93.615521570" Dec 09 16:58:29 crc kubenswrapper[4840]: I1209 16:58:29.608058 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:29 crc kubenswrapper[4840]: I1209 16:58:29.608097 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:29 crc kubenswrapper[4840]: I1209 16:58:29.608244 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:29 crc kubenswrapper[4840]: E1209 16:58:29.608401 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:29 crc kubenswrapper[4840]: I1209 16:58:29.608463 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:29 crc kubenswrapper[4840]: E1209 16:58:29.608534 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:29 crc kubenswrapper[4840]: E1209 16:58:29.608667 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:29 crc kubenswrapper[4840]: E1209 16:58:29.608840 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:31 crc kubenswrapper[4840]: I1209 16:58:31.607939 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:31 crc kubenswrapper[4840]: I1209 16:58:31.608096 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:31 crc kubenswrapper[4840]: I1209 16:58:31.608115 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:31 crc kubenswrapper[4840]: I1209 16:58:31.608201 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:31 crc kubenswrapper[4840]: E1209 16:58:31.608332 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:31 crc kubenswrapper[4840]: E1209 16:58:31.608640 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:31 crc kubenswrapper[4840]: E1209 16:58:31.608850 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:31 crc kubenswrapper[4840]: E1209 16:58:31.609110 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:32 crc kubenswrapper[4840]: I1209 16:58:32.358058 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:58:32 crc kubenswrapper[4840]: I1209 16:58:32.359171 4840 scope.go:117] "RemoveContainer" containerID="1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852" Dec 09 16:58:32 crc kubenswrapper[4840]: E1209 16:58:32.359359 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" Dec 09 16:58:33 crc kubenswrapper[4840]: I1209 16:58:33.608088 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:33 crc kubenswrapper[4840]: I1209 16:58:33.608114 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:33 crc kubenswrapper[4840]: I1209 16:58:33.608122 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:33 crc kubenswrapper[4840]: E1209 16:58:33.608252 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:33 crc kubenswrapper[4840]: I1209 16:58:33.608366 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:33 crc kubenswrapper[4840]: E1209 16:58:33.608547 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:33 crc kubenswrapper[4840]: E1209 16:58:33.608648 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:33 crc kubenswrapper[4840]: E1209 16:58:33.608748 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:33 crc kubenswrapper[4840]: I1209 16:58:33.632949 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Dec 09 16:58:34 crc kubenswrapper[4840]: I1209 16:58:34.653004 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=1.652938486 podStartE2EDuration="1.652938486s" podCreationTimestamp="2025-12-09 16:58:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:58:34.651905275 +0000 UTC m=+100.643015948" watchObservedRunningTime="2025-12-09 16:58:34.652938486 +0000 UTC m=+100.644049159" Dec 09 16:58:35 crc kubenswrapper[4840]: I1209 16:58:35.607956 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:35 crc kubenswrapper[4840]: I1209 16:58:35.608040 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:35 crc kubenswrapper[4840]: I1209 16:58:35.608008 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:35 crc kubenswrapper[4840]: I1209 16:58:35.608018 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:35 crc kubenswrapper[4840]: E1209 16:58:35.608201 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:35 crc kubenswrapper[4840]: E1209 16:58:35.608354 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:35 crc kubenswrapper[4840]: E1209 16:58:35.608486 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:35 crc kubenswrapper[4840]: E1209 16:58:35.608605 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:36 crc kubenswrapper[4840]: I1209 16:58:36.419824 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:36 crc kubenswrapper[4840]: E1209 16:58:36.420054 4840 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:58:36 crc kubenswrapper[4840]: E1209 16:58:36.420183 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs podName:2099e918-a035-4659-8247-971e3e59c6ef nodeName:}" failed. No retries permitted until 2025-12-09 16:59:40.420154757 +0000 UTC m=+166.411265420 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs") pod "network-metrics-daemon-hc4xq" (UID: "2099e918-a035-4659-8247-971e3e59c6ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 16:58:37 crc kubenswrapper[4840]: I1209 16:58:37.607689 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:37 crc kubenswrapper[4840]: E1209 16:58:37.608812 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:37 crc kubenswrapper[4840]: I1209 16:58:37.607752 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:37 crc kubenswrapper[4840]: I1209 16:58:37.607815 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:37 crc kubenswrapper[4840]: E1209 16:58:37.609251 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:37 crc kubenswrapper[4840]: E1209 16:58:37.609395 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:37 crc kubenswrapper[4840]: I1209 16:58:37.607705 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:37 crc kubenswrapper[4840]: E1209 16:58:37.609555 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:39 crc kubenswrapper[4840]: I1209 16:58:39.608155 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:39 crc kubenswrapper[4840]: I1209 16:58:39.608226 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:39 crc kubenswrapper[4840]: I1209 16:58:39.608284 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:39 crc kubenswrapper[4840]: I1209 16:58:39.608165 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:39 crc kubenswrapper[4840]: E1209 16:58:39.608364 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:39 crc kubenswrapper[4840]: E1209 16:58:39.608527 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:39 crc kubenswrapper[4840]: E1209 16:58:39.608596 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:39 crc kubenswrapper[4840]: E1209 16:58:39.608693 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:41 crc kubenswrapper[4840]: I1209 16:58:41.608119 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:41 crc kubenswrapper[4840]: I1209 16:58:41.608209 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:41 crc kubenswrapper[4840]: I1209 16:58:41.608143 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:41 crc kubenswrapper[4840]: I1209 16:58:41.608256 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:41 crc kubenswrapper[4840]: E1209 16:58:41.608349 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:41 crc kubenswrapper[4840]: E1209 16:58:41.608424 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:41 crc kubenswrapper[4840]: E1209 16:58:41.608516 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:41 crc kubenswrapper[4840]: E1209 16:58:41.608579 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:43 crc kubenswrapper[4840]: I1209 16:58:43.608107 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:43 crc kubenswrapper[4840]: I1209 16:58:43.608363 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:43 crc kubenswrapper[4840]: I1209 16:58:43.608452 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:43 crc kubenswrapper[4840]: E1209 16:58:43.608538 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:43 crc kubenswrapper[4840]: I1209 16:58:43.608569 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:43 crc kubenswrapper[4840]: E1209 16:58:43.608693 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:43 crc kubenswrapper[4840]: E1209 16:58:43.608908 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:43 crc kubenswrapper[4840]: E1209 16:58:43.609103 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:45 crc kubenswrapper[4840]: I1209 16:58:45.608111 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:45 crc kubenswrapper[4840]: I1209 16:58:45.608184 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:45 crc kubenswrapper[4840]: E1209 16:58:45.608853 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:45 crc kubenswrapper[4840]: I1209 16:58:45.608232 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:45 crc kubenswrapper[4840]: I1209 16:58:45.608234 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:45 crc kubenswrapper[4840]: E1209 16:58:45.609003 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:45 crc kubenswrapper[4840]: E1209 16:58:45.609205 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:45 crc kubenswrapper[4840]: E1209 16:58:45.609262 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:47 crc kubenswrapper[4840]: I1209 16:58:47.608752 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:47 crc kubenswrapper[4840]: I1209 16:58:47.608787 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:47 crc kubenswrapper[4840]: I1209 16:58:47.608929 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:47 crc kubenswrapper[4840]: E1209 16:58:47.609611 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:47 crc kubenswrapper[4840]: I1209 16:58:47.609714 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:47 crc kubenswrapper[4840]: I1209 16:58:47.609804 4840 scope.go:117] "RemoveContainer" containerID="1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852" Dec 09 16:58:47 crc kubenswrapper[4840]: E1209 16:58:47.609790 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:47 crc kubenswrapper[4840]: E1209 16:58:47.609870 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:47 crc kubenswrapper[4840]: E1209 16:58:47.610021 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lpfl9_openshift-ovn-kubernetes(33826d17-3660-4069-b173-accfbe7e24b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" Dec 09 16:58:47 crc kubenswrapper[4840]: E1209 16:58:47.610138 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:49 crc kubenswrapper[4840]: I1209 16:58:49.608217 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:49 crc kubenswrapper[4840]: I1209 16:58:49.608295 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:49 crc kubenswrapper[4840]: I1209 16:58:49.608217 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:49 crc kubenswrapper[4840]: I1209 16:58:49.608399 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:49 crc kubenswrapper[4840]: E1209 16:58:49.608464 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:49 crc kubenswrapper[4840]: E1209 16:58:49.608541 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:49 crc kubenswrapper[4840]: E1209 16:58:49.608697 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:49 crc kubenswrapper[4840]: E1209 16:58:49.608778 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:51 crc kubenswrapper[4840]: I1209 16:58:51.233028 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n2cr9_9c465ec1-5011-46d7-bcf3-df79d8b4543b/kube-multus/1.log" Dec 09 16:58:51 crc kubenswrapper[4840]: I1209 16:58:51.233861 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n2cr9_9c465ec1-5011-46d7-bcf3-df79d8b4543b/kube-multus/0.log" Dec 09 16:58:51 crc kubenswrapper[4840]: I1209 16:58:51.233942 4840 generic.go:334] "Generic (PLEG): container finished" podID="9c465ec1-5011-46d7-bcf3-df79d8b4543b" containerID="cf116e74f5c75d356cca263037e2a3f3691e52068bed58871445e273a1092786" exitCode=1 Dec 09 16:58:51 crc kubenswrapper[4840]: I1209 16:58:51.234015 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n2cr9" event={"ID":"9c465ec1-5011-46d7-bcf3-df79d8b4543b","Type":"ContainerDied","Data":"cf116e74f5c75d356cca263037e2a3f3691e52068bed58871445e273a1092786"} Dec 09 16:58:51 crc kubenswrapper[4840]: I1209 16:58:51.234068 4840 scope.go:117] "RemoveContainer" containerID="973b6c4ec8a8960fe73316008c5564c5ac7a20a9cde72fb89ec527e77f4ea5d9" Dec 09 16:58:51 crc kubenswrapper[4840]: I1209 16:58:51.234635 4840 scope.go:117] "RemoveContainer" containerID="cf116e74f5c75d356cca263037e2a3f3691e52068bed58871445e273a1092786" Dec 09 16:58:51 crc kubenswrapper[4840]: E1209 16:58:51.234935 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-n2cr9_openshift-multus(9c465ec1-5011-46d7-bcf3-df79d8b4543b)\"" pod="openshift-multus/multus-n2cr9" podUID="9c465ec1-5011-46d7-bcf3-df79d8b4543b" Dec 09 16:58:51 crc kubenswrapper[4840]: I1209 16:58:51.608508 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:51 crc kubenswrapper[4840]: I1209 16:58:51.608557 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:51 crc kubenswrapper[4840]: I1209 16:58:51.608616 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:51 crc kubenswrapper[4840]: I1209 16:58:51.608525 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:51 crc kubenswrapper[4840]: E1209 16:58:51.608698 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:51 crc kubenswrapper[4840]: E1209 16:58:51.608854 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:51 crc kubenswrapper[4840]: E1209 16:58:51.608942 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:51 crc kubenswrapper[4840]: E1209 16:58:51.609121 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:52 crc kubenswrapper[4840]: I1209 16:58:52.239930 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n2cr9_9c465ec1-5011-46d7-bcf3-df79d8b4543b/kube-multus/1.log" Dec 09 16:58:53 crc kubenswrapper[4840]: I1209 16:58:53.608004 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:53 crc kubenswrapper[4840]: I1209 16:58:53.608042 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:53 crc kubenswrapper[4840]: I1209 16:58:53.608235 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:53 crc kubenswrapper[4840]: E1209 16:58:53.608467 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:53 crc kubenswrapper[4840]: E1209 16:58:53.608603 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:53 crc kubenswrapper[4840]: E1209 16:58:53.608806 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:53 crc kubenswrapper[4840]: I1209 16:58:53.609179 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:53 crc kubenswrapper[4840]: E1209 16:58:53.609309 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:54 crc kubenswrapper[4840]: E1209 16:58:54.594134 4840 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Dec 09 16:58:54 crc kubenswrapper[4840]: E1209 16:58:54.745061 4840 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 16:58:55 crc kubenswrapper[4840]: I1209 16:58:55.608277 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:55 crc kubenswrapper[4840]: I1209 16:58:55.608335 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:55 crc kubenswrapper[4840]: I1209 16:58:55.608349 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:55 crc kubenswrapper[4840]: I1209 16:58:55.608407 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:55 crc kubenswrapper[4840]: E1209 16:58:55.608603 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:55 crc kubenswrapper[4840]: E1209 16:58:55.608766 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:55 crc kubenswrapper[4840]: E1209 16:58:55.608915 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:55 crc kubenswrapper[4840]: E1209 16:58:55.609104 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:57 crc kubenswrapper[4840]: I1209 16:58:57.607756 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:57 crc kubenswrapper[4840]: I1209 16:58:57.607789 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:57 crc kubenswrapper[4840]: E1209 16:58:57.608021 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:57 crc kubenswrapper[4840]: I1209 16:58:57.608045 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:57 crc kubenswrapper[4840]: I1209 16:58:57.608081 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:57 crc kubenswrapper[4840]: E1209 16:58:57.608249 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:57 crc kubenswrapper[4840]: E1209 16:58:57.608422 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:57 crc kubenswrapper[4840]: E1209 16:58:57.608512 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:59 crc kubenswrapper[4840]: I1209 16:58:59.607531 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:58:59 crc kubenswrapper[4840]: I1209 16:58:59.607578 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:58:59 crc kubenswrapper[4840]: I1209 16:58:59.607652 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:58:59 crc kubenswrapper[4840]: I1209 16:58:59.607545 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:58:59 crc kubenswrapper[4840]: E1209 16:58:59.607715 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:58:59 crc kubenswrapper[4840]: E1209 16:58:59.607849 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:58:59 crc kubenswrapper[4840]: E1209 16:58:59.607937 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:58:59 crc kubenswrapper[4840]: E1209 16:58:59.608002 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:58:59 crc kubenswrapper[4840]: E1209 16:58:59.747846 4840 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 16:59:00 crc kubenswrapper[4840]: I1209 16:59:00.608778 4840 scope.go:117] "RemoveContainer" containerID="1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852" Dec 09 16:59:01 crc kubenswrapper[4840]: I1209 16:59:01.277179 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/3.log" Dec 09 16:59:01 crc kubenswrapper[4840]: I1209 16:59:01.279824 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerStarted","Data":"0b264648af59c3553deaa61e50dd6d64c709043b7d9b08b6cb4a2bb87a665604"} Dec 09 16:59:01 crc kubenswrapper[4840]: I1209 16:59:01.280238 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:59:01 crc kubenswrapper[4840]: I1209 16:59:01.321228 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podStartSLOduration=104.321213724 podStartE2EDuration="1m44.321213724s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:01.320064049 +0000 UTC m=+127.311174682" watchObservedRunningTime="2025-12-09 16:59:01.321213724 +0000 UTC m=+127.312324357" Dec 09 16:59:01 crc kubenswrapper[4840]: I1209 16:59:01.513362 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-hc4xq"] Dec 09 16:59:01 crc kubenswrapper[4840]: I1209 16:59:01.513498 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:59:01 crc kubenswrapper[4840]: E1209 16:59:01.513606 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:59:01 crc kubenswrapper[4840]: I1209 16:59:01.608066 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:59:01 crc kubenswrapper[4840]: I1209 16:59:01.608140 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:59:01 crc kubenswrapper[4840]: E1209 16:59:01.608210 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:59:01 crc kubenswrapper[4840]: E1209 16:59:01.608314 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:59:01 crc kubenswrapper[4840]: I1209 16:59:01.608509 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:59:01 crc kubenswrapper[4840]: E1209 16:59:01.608592 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:59:02 crc kubenswrapper[4840]: I1209 16:59:02.608946 4840 scope.go:117] "RemoveContainer" containerID="cf116e74f5c75d356cca263037e2a3f3691e52068bed58871445e273a1092786" Dec 09 16:59:03 crc kubenswrapper[4840]: I1209 16:59:03.290279 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n2cr9_9c465ec1-5011-46d7-bcf3-df79d8b4543b/kube-multus/1.log" Dec 09 16:59:03 crc kubenswrapper[4840]: I1209 16:59:03.290696 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n2cr9" event={"ID":"9c465ec1-5011-46d7-bcf3-df79d8b4543b","Type":"ContainerStarted","Data":"f58e1517a2111fc4fc59d0def3cd15c5d0f34babfa2fd766c41b17ea6d14b315"} Dec 09 16:59:03 crc kubenswrapper[4840]: I1209 16:59:03.607763 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:59:03 crc kubenswrapper[4840]: I1209 16:59:03.608132 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:59:03 crc kubenswrapper[4840]: E1209 16:59:03.608207 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 16:59:03 crc kubenswrapper[4840]: I1209 16:59:03.608245 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:59:03 crc kubenswrapper[4840]: E1209 16:59:03.608374 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hc4xq" podUID="2099e918-a035-4659-8247-971e3e59c6ef" Dec 09 16:59:03 crc kubenswrapper[4840]: E1209 16:59:03.608476 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 16:59:03 crc kubenswrapper[4840]: I1209 16:59:03.608799 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:59:03 crc kubenswrapper[4840]: E1209 16:59:03.609158 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 16:59:05 crc kubenswrapper[4840]: I1209 16:59:05.608301 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:59:05 crc kubenswrapper[4840]: I1209 16:59:05.608408 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:59:05 crc kubenswrapper[4840]: I1209 16:59:05.608406 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:59:05 crc kubenswrapper[4840]: I1209 16:59:05.608535 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:59:05 crc kubenswrapper[4840]: I1209 16:59:05.611504 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 09 16:59:05 crc kubenswrapper[4840]: I1209 16:59:05.611640 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 09 16:59:05 crc kubenswrapper[4840]: I1209 16:59:05.611732 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 09 16:59:05 crc kubenswrapper[4840]: I1209 16:59:05.612092 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 09 16:59:05 crc kubenswrapper[4840]: I1209 16:59:05.612168 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 09 16:59:05 crc kubenswrapper[4840]: I1209 16:59:05.612348 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.517809 4840 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.572034 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4whfq"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.573026 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.573908 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p6wc"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.574604 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.578143 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.579262 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.581687 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.582775 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.583509 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.584472 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.585377 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.585955 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.586251 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.586391 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.587336 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.591399 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-g5dvm"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.592452 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.593785 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-thzxd"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.594543 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.596034 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.596905 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.598227 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.598943 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.599581 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-8x5sv"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.600268 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.602723 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.603133 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.614583 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.614615 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.614909 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.617064 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.617373 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-b82t4"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.617409 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.618099 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.618209 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.618790 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.620110 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pr2p8"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.620571 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.622144 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.622381 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.625048 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.625355 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.625819 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.626489 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.626661 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.626870 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.626502 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.627287 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.627420 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.627921 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.628463 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.628876 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.629011 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.629116 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.629215 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.634391 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.634645 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.634853 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.634870 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.634934 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.635217 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.635623 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.636489 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.636900 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.637237 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.641617 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.642270 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.645494 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.645948 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.646424 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.646936 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.647510 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.649988 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.650335 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.663168 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.663257 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.663439 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.663530 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.663645 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.663678 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.663698 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.663810 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.663893 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.664001 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.664015 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.664095 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.664210 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.664270 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.664388 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.664419 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.664633 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.664678 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.664798 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.667039 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.667355 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-z8p7f"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.668195 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.668375 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-h5ltv"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.670359 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.672160 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.675451 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.676770 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.677242 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.677568 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.677916 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.677936 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-h5ltv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.678285 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.678878 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.689557 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-tf562"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.690240 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.696852 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.697436 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.697687 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.698049 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-ngfrq"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.698485 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.699296 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.699307 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.699441 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.699463 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.699560 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.700006 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.700039 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.699723 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.701406 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.700623 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.702057 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.702346 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.702384 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.702797 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.704216 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.713377 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.718087 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.720213 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721067 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/75849ca2-4d93-4c36-98aa-d159d4d03973-machine-approver-tls\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721104 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dd8094fb-e45c-481c-99b5-881758870b4b-available-featuregates\") pod \"openshift-config-operator-7777fb866f-b82t4\" (UID: \"dd8094fb-e45c-481c-99b5-881758870b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721132 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721207 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721230 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721252 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5k8p\" (UniqueName: \"kubernetes.io/projected/40f11448-6267-4747-9954-da5b290bcef6-kube-api-access-p5k8p\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721274 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/30551113-d3e0-4335-910a-433ea706e8e2-node-pullsecrets\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721297 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721317 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtk2n\" (UniqueName: \"kubernetes.io/projected/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-kube-api-access-vtk2n\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721339 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/75849ca2-4d93-4c36-98aa-d159d4d03973-auth-proxy-config\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721360 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721391 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-image-import-ca\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721409 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/30551113-d3e0-4335-910a-433ea706e8e2-encryption-config\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721431 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f21e203-bf50-49e2-9dfe-90606c2e0ff2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-krdgz\" (UID: \"7f21e203-bf50-49e2-9dfe-90606c2e0ff2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721468 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-service-ca-bundle\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721490 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721512 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd8094fb-e45c-481c-99b5-881758870b4b-serving-cert\") pod \"openshift-config-operator-7777fb866f-b82t4\" (UID: \"dd8094fb-e45c-481c-99b5-881758870b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721534 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-policies\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721557 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721577 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8lr9\" (UniqueName: \"kubernetes.io/projected/dd8094fb-e45c-481c-99b5-881758870b4b-kube-api-access-j8lr9\") pod \"openshift-config-operator-7777fb866f-b82t4\" (UID: \"dd8094fb-e45c-481c-99b5-881758870b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721609 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/25ed38df-aac2-41d6-a51b-694af6b9cbf3-audit-dir\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721633 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ns4wt\" (UniqueName: \"kubernetes.io/projected/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-kube-api-access-ns4wt\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721654 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f21e203-bf50-49e2-9dfe-90606c2e0ff2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-krdgz\" (UID: \"7f21e203-bf50-49e2-9dfe-90606c2e0ff2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721694 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-serving-cert\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721715 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25ed38df-aac2-41d6-a51b-694af6b9cbf3-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721739 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqqtk\" (UniqueName: \"kubernetes.io/projected/7f21e203-bf50-49e2-9dfe-90606c2e0ff2-kube-api-access-lqqtk\") pod \"openshift-apiserver-operator-796bbdcf4f-krdgz\" (UID: \"7f21e203-bf50-49e2-9dfe-90606c2e0ff2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721764 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721800 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/49c57793-db28-4be3-81ee-01570255716c-images\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721822 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721875 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/25ed38df-aac2-41d6-a51b-694af6b9cbf3-audit-policies\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721898 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-trusted-ca-bundle\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721918 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30551113-d3e0-4335-910a-433ea706e8e2-serving-cert\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721936 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6jgf\" (UniqueName: \"kubernetes.io/projected/30551113-d3e0-4335-910a-433ea706e8e2-kube-api-access-x6jgf\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.721992 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-dir\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722012 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722031 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722051 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/30551113-d3e0-4335-910a-433ea706e8e2-audit-dir\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722069 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-config\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722099 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6r7sv\" (UniqueName: \"kubernetes.io/projected/2c88493e-2461-4e30-b7c9-803beb3fec3b-kube-api-access-6r7sv\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722117 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-trusted-ca\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722142 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/49c57793-db28-4be3-81ee-01570255716c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722162 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-etcd-serving-ca\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722193 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9q4mm\" (UniqueName: \"kubernetes.io/projected/49c57793-db28-4be3-81ee-01570255716c-kube-api-access-9q4mm\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722213 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-config\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722232 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7zll\" (UniqueName: \"kubernetes.io/projected/75849ca2-4d93-4c36-98aa-d159d4d03973-kube-api-access-n7zll\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722254 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40f11448-6267-4747-9954-da5b290bcef6-serving-cert\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722270 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25ed38df-aac2-41d6-a51b-694af6b9cbf3-serving-cert\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722289 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-oauth-serving-cert\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722307 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-audit\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722329 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/25ed38df-aac2-41d6-a51b-694af6b9cbf3-etcd-client\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722352 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722394 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49c57793-db28-4be3-81ee-01570255716c-config\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722417 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722435 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/25ed38df-aac2-41d6-a51b-694af6b9cbf3-encryption-config\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722455 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/47c87108-afd0-41e4-b1be-8221158b81c4-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-g5g2d\" (UID: \"47c87108-afd0-41e4-b1be-8221158b81c4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722471 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-oauth-config\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722487 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-service-ca\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722510 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-serving-cert\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722530 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-client-ca\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722551 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zs7vr\" (UniqueName: \"kubernetes.io/projected/47c87108-afd0-41e4-b1be-8221158b81c4-kube-api-access-zs7vr\") pod \"cluster-samples-operator-665b6dd947-g5g2d\" (UID: \"47c87108-afd0-41e4-b1be-8221158b81c4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722571 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-246ks\" (UniqueName: \"kubernetes.io/projected/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-kube-api-access-246ks\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722593 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/25ed38df-aac2-41d6-a51b-694af6b9cbf3-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722611 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-serving-cert\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722670 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75849ca2-4d93-4c36-98aa-d159d4d03973-config\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722695 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ljjl\" (UniqueName: \"kubernetes.io/projected/25ed38df-aac2-41d6-a51b-694af6b9cbf3-kube-api-access-5ljjl\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722723 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722750 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-config\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722781 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/30551113-d3e0-4335-910a-433ea706e8e2-etcd-client\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722800 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-config\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.722820 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-config\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.727653 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.727851 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.732228 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.732744 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.732874 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.736697 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.755197 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bczl5"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.755386 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.755853 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.756056 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.756226 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.756550 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.756700 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.757061 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.757186 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.757068 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.757432 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.757523 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.759744 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.761239 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.761297 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.764214 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.766499 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.766511 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.766870 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-nw6vb"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.767489 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.768334 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.768706 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.775857 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.776365 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.777079 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.777958 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4whfq"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.780552 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zfvrz"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.781361 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.787163 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.787934 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.789443 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.790130 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.795451 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2ckb5"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.795563 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.796184 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.796898 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.797544 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.798263 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.800098 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.800201 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.801065 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.801305 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.802483 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.802657 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5qngm"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.803920 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jhqph"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.804104 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.805095 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.805319 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-g5dvm"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.807005 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-j5vkl"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.810787 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.812256 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.815882 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.817791 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.817825 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.819408 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-8x5sv"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.823619 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/25ed38df-aac2-41d6-a51b-694af6b9cbf3-audit-dir\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.823676 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ns4wt\" (UniqueName: \"kubernetes.io/projected/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-kube-api-access-ns4wt\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.823707 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f21e203-bf50-49e2-9dfe-90606c2e0ff2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-krdgz\" (UID: \"7f21e203-bf50-49e2-9dfe-90606c2e0ff2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.823691 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/25ed38df-aac2-41d6-a51b-694af6b9cbf3-audit-dir\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.823740 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-serving-cert\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.823809 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25ed38df-aac2-41d6-a51b-694af6b9cbf3-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.825875 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqqtk\" (UniqueName: \"kubernetes.io/projected/7f21e203-bf50-49e2-9dfe-90606c2e0ff2-kube-api-access-lqqtk\") pod \"openshift-apiserver-operator-796bbdcf4f-krdgz\" (UID: \"7f21e203-bf50-49e2-9dfe-90606c2e0ff2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.825932 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/545b98b0-939f-4840-9d10-57ec468b1d62-trusted-ca\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.825998 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826044 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/64305475-68e4-4261-84eb-f72a300194f6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-zp8rr\" (UID: \"64305475-68e4-4261-84eb-f72a300194f6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826077 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3649bba-310a-439c-9d29-75c684e6d06a-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bxntg\" (UID: \"a3649bba-310a-439c-9d29-75c684e6d06a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826103 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkcrr\" (UniqueName: \"kubernetes.io/projected/a3649bba-310a-439c-9d29-75c684e6d06a-kube-api-access-lkcrr\") pod \"machine-config-controller-84d6567774-bxntg\" (UID: \"a3649bba-310a-439c-9d29-75c684e6d06a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826151 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/49c57793-db28-4be3-81ee-01570255716c-images\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826180 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826225 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/25ed38df-aac2-41d6-a51b-694af6b9cbf3-audit-policies\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826262 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-trusted-ca-bundle\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826394 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30551113-d3e0-4335-910a-433ea706e8e2-serving-cert\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826513 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6jgf\" (UniqueName: \"kubernetes.io/projected/30551113-d3e0-4335-910a-433ea706e8e2-kube-api-access-x6jgf\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826609 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf9zk\" (UniqueName: \"kubernetes.io/projected/a3c47983-799f-4700-b733-d040ca0159a3-kube-api-access-nf9zk\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826712 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-dir\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826787 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.826863 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a3c47983-799f-4700-b733-d040ca0159a3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.829710 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.829751 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/30551113-d3e0-4335-910a-433ea706e8e2-audit-dir\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.829810 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/30551113-d3e0-4335-910a-433ea706e8e2-audit-dir\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.829504 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.829509 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-dir\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.830269 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-trusted-ca-bundle\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.830547 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/49c57793-db28-4be3-81ee-01570255716c-images\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.830839 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25ed38df-aac2-41d6-a51b-694af6b9cbf3-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.831387 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-config\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.832231 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-config\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.832283 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6r7sv\" (UniqueName: \"kubernetes.io/projected/2c88493e-2461-4e30-b7c9-803beb3fec3b-kube-api-access-6r7sv\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.832310 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-trusted-ca\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.832361 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/49c57793-db28-4be3-81ee-01570255716c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.832387 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-etcd-serving-ca\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.832417 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c6a94168-8373-4b69-ada3-934f7eeeb408-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-tf562\" (UID: \"c6a94168-8373-4b69-ada3-934f7eeeb408\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.832864 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.833337 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-trusted-ca\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.834735 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/25ed38df-aac2-41d6-a51b-694af6b9cbf3-audit-policies\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.835050 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.835215 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-serving-cert\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.835278 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9q4mm\" (UniqueName: \"kubernetes.io/projected/49c57793-db28-4be3-81ee-01570255716c-kube-api-access-9q4mm\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.835351 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-config\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.835400 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.835613 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7zll\" (UniqueName: \"kubernetes.io/projected/75849ca2-4d93-4c36-98aa-d159d4d03973-kube-api-access-n7zll\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.835837 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.835926 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-etcd-serving-ca\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.836070 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db36b460-4849-4b66-ad2a-c5f63dab809c-config\") pod \"kube-controller-manager-operator-78b949d7b-p4bwb\" (UID: \"db36b460-4849-4b66-ad2a-c5f63dab809c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.836732 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40f11448-6267-4747-9954-da5b290bcef6-serving-cert\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.836777 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25ed38df-aac2-41d6-a51b-694af6b9cbf3-serving-cert\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.836804 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-oauth-serving-cert\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.836828 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-audit\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.836856 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkqql\" (UniqueName: \"kubernetes.io/projected/acf89e2d-7f49-4872-96dd-41d47629998c-kube-api-access-gkqql\") pod \"downloads-7954f5f757-h5ltv\" (UID: \"acf89e2d-7f49-4872-96dd-41d47629998c\") " pod="openshift-console/downloads-7954f5f757-h5ltv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.836886 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/25ed38df-aac2-41d6-a51b-694af6b9cbf3-etcd-client\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.836911 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/db36b460-4849-4b66-ad2a-c5f63dab809c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-p4bwb\" (UID: \"db36b460-4849-4b66-ad2a-c5f63dab809c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.836949 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.836997 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49c57793-db28-4be3-81ee-01570255716c-config\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837022 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837049 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/25ed38df-aac2-41d6-a51b-694af6b9cbf3-encryption-config\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837076 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/47c87108-afd0-41e4-b1be-8221158b81c4-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-g5g2d\" (UID: \"47c87108-afd0-41e4-b1be-8221158b81c4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837099 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-oauth-config\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837123 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-service-ca\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837149 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-serving-cert\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837181 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-client-ca\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837212 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a3c47983-799f-4700-b733-d040ca0159a3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837239 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a3649bba-310a-439c-9d29-75c684e6d06a-proxy-tls\") pod \"machine-config-controller-84d6567774-bxntg\" (UID: \"a3649bba-310a-439c-9d29-75c684e6d06a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837271 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zs7vr\" (UniqueName: \"kubernetes.io/projected/47c87108-afd0-41e4-b1be-8221158b81c4-kube-api-access-zs7vr\") pod \"cluster-samples-operator-665b6dd947-g5g2d\" (UID: \"47c87108-afd0-41e4-b1be-8221158b81c4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837298 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-246ks\" (UniqueName: \"kubernetes.io/projected/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-kube-api-access-246ks\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837326 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/25ed38df-aac2-41d6-a51b-694af6b9cbf3-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837350 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-serving-cert\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837375 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75849ca2-4d93-4c36-98aa-d159d4d03973-config\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.836686 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/49c57793-db28-4be3-81ee-01570255716c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837810 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.837954 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-config\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.838707 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/25ed38df-aac2-41d6-a51b-694af6b9cbf3-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.839241 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.839288 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-client-ca\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840012 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49c57793-db28-4be3-81ee-01570255716c-config\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840125 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ljjl\" (UniqueName: \"kubernetes.io/projected/25ed38df-aac2-41d6-a51b-694af6b9cbf3-kube-api-access-5ljjl\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840143 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75849ca2-4d93-4c36-98aa-d159d4d03973-config\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840159 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840191 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-config\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840217 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/30551113-d3e0-4335-910a-433ea706e8e2-etcd-client\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840221 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30551113-d3e0-4335-910a-433ea706e8e2-serving-cert\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840246 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a3c47983-799f-4700-b733-d040ca0159a3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840277 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-config\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840587 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f21e203-bf50-49e2-9dfe-90606c2e0ff2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-krdgz\" (UID: \"7f21e203-bf50-49e2-9dfe-90606c2e0ff2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840766 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-audit\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.840908 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-oauth-serving-cert\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.841098 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-config\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.841225 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/75849ca2-4d93-4c36-98aa-d159d4d03973-machine-approver-tls\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.841313 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dd8094fb-e45c-481c-99b5-881758870b4b-available-featuregates\") pod \"openshift-config-operator-7777fb866f-b82t4\" (UID: \"dd8094fb-e45c-481c-99b5-881758870b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.841475 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.841727 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-service-ca\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.842199 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-config\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.842526 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.842659 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/25ed38df-aac2-41d6-a51b-694af6b9cbf3-etcd-client\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.842734 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/30551113-d3e0-4335-910a-433ea706e8e2-etcd-client\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.842797 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25ed38df-aac2-41d6-a51b-694af6b9cbf3-serving-cert\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.842847 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.842876 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.842920 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.842939 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5k8p\" (UniqueName: \"kubernetes.io/projected/40f11448-6267-4747-9954-da5b290bcef6-kube-api-access-p5k8p\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.842955 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/30551113-d3e0-4335-910a-433ea706e8e2-node-pullsecrets\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.842996 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/64305475-68e4-4261-84eb-f72a300194f6-srv-cert\") pod \"olm-operator-6b444d44fb-zp8rr\" (UID: \"64305475-68e4-4261-84eb-f72a300194f6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843046 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkv9n\" (UniqueName: \"kubernetes.io/projected/c6a94168-8373-4b69-ada3-934f7eeeb408-kube-api-access-wkv9n\") pod \"multus-admission-controller-857f4d67dd-tf562\" (UID: \"c6a94168-8373-4b69-ada3-934f7eeeb408\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843065 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843082 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtk2n\" (UniqueName: \"kubernetes.io/projected/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-kube-api-access-vtk2n\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843122 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/75849ca2-4d93-4c36-98aa-d159d4d03973-auth-proxy-config\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843128 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dd8094fb-e45c-481c-99b5-881758870b4b-available-featuregates\") pod \"openshift-config-operator-7777fb866f-b82t4\" (UID: \"dd8094fb-e45c-481c-99b5-881758870b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843144 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843174 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-image-import-ca\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843192 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/545b98b0-939f-4840-9d10-57ec468b1d62-metrics-tls\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843210 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/30551113-d3e0-4335-910a-433ea706e8e2-encryption-config\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843228 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f21e203-bf50-49e2-9dfe-90606c2e0ff2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-krdgz\" (UID: \"7f21e203-bf50-49e2-9dfe-90606c2e0ff2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843245 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78hlk\" (UniqueName: \"kubernetes.io/projected/64305475-68e4-4261-84eb-f72a300194f6-kube-api-access-78hlk\") pod \"olm-operator-6b444d44fb-zp8rr\" (UID: \"64305475-68e4-4261-84eb-f72a300194f6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843305 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-config\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843336 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/545b98b0-939f-4840-9d10-57ec468b1d62-bound-sa-token\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843369 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvfws\" (UniqueName: \"kubernetes.io/projected/545b98b0-939f-4840-9d10-57ec468b1d62-kube-api-access-jvfws\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843425 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-service-ca-bundle\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843456 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843367 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/30551113-d3e0-4335-910a-433ea706e8e2-node-pullsecrets\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843540 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd8094fb-e45c-481c-99b5-881758870b4b-serving-cert\") pod \"openshift-config-operator-7777fb866f-b82t4\" (UID: \"dd8094fb-e45c-481c-99b5-881758870b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843572 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db36b460-4849-4b66-ad2a-c5f63dab809c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-p4bwb\" (UID: \"db36b460-4849-4b66-ad2a-c5f63dab809c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843597 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-policies\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843622 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843652 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8lr9\" (UniqueName: \"kubernetes.io/projected/dd8094fb-e45c-481c-99b5-881758870b4b-kube-api-access-j8lr9\") pod \"openshift-config-operator-7777fb866f-b82t4\" (UID: \"dd8094fb-e45c-481c-99b5-881758870b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843750 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f21e203-bf50-49e2-9dfe-90606c2e0ff2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-krdgz\" (UID: \"7f21e203-bf50-49e2-9dfe-90606c2e0ff2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.843795 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-serving-cert\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.844428 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-config\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.844589 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/75849ca2-4d93-4c36-98aa-d159d4d03973-auth-proxy-config\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.845346 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.845693 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/75849ca2-4d93-4c36-98aa-d159d4d03973-machine-approver-tls\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.846288 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.846663 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.846900 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.846947 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p6wc"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.847201 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-policies\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.848399 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/30551113-d3e0-4335-910a-433ea706e8e2-image-import-ca\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.848586 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40f11448-6267-4747-9954-da5b290bcef6-serving-cert\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.849041 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-service-ca-bundle\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.849506 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-h5ltv"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.849770 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.850694 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd8094fb-e45c-481c-99b5-881758870b4b-serving-cert\") pod \"openshift-config-operator-7777fb866f-b82t4\" (UID: \"dd8094fb-e45c-481c-99b5-881758870b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.850759 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/30551113-d3e0-4335-910a-433ea706e8e2-encryption-config\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.850926 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.852188 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.853294 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-thzxd"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.853430 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/25ed38df-aac2-41d6-a51b-694af6b9cbf3-encryption-config\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.855156 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.855156 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/47c87108-afd0-41e4-b1be-8221158b81c4-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-g5g2d\" (UID: \"47c87108-afd0-41e4-b1be-8221158b81c4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.856148 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-oauth-config\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.857075 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-z8p7f"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.858241 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.859426 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.859558 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.861841 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-b82t4"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.862909 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.864192 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.864828 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-serving-cert\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.865303 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.866942 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-8v7mq"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.867562 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-8v7mq" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.868293 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-wlgg5"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.868923 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-wlgg5" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.869679 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5qngm"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.871054 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-nw6vb"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.872162 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zfvrz"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.873243 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.874303 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bczl5"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.874684 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.876178 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.877401 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.878472 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.879926 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.881155 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2ckb5"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.882319 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pr2p8"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.883458 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-tf562"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.884545 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.885727 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-wlgg5"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.886701 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.887829 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.888853 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.889949 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-j5vkl"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.891011 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.892167 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jhqph"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.893266 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-vvcq2"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.894061 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.894817 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.895607 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-vvcq2"] Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.915181 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.936324 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.944906 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c6a94168-8373-4b69-ada3-934f7eeeb408-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-tf562\" (UID: \"c6a94168-8373-4b69-ada3-934f7eeeb408\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.944994 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db36b460-4849-4b66-ad2a-c5f63dab809c-config\") pod \"kube-controller-manager-operator-78b949d7b-p4bwb\" (UID: \"db36b460-4849-4b66-ad2a-c5f63dab809c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945022 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkqql\" (UniqueName: \"kubernetes.io/projected/acf89e2d-7f49-4872-96dd-41d47629998c-kube-api-access-gkqql\") pod \"downloads-7954f5f757-h5ltv\" (UID: \"acf89e2d-7f49-4872-96dd-41d47629998c\") " pod="openshift-console/downloads-7954f5f757-h5ltv" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945039 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/db36b460-4849-4b66-ad2a-c5f63dab809c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-p4bwb\" (UID: \"db36b460-4849-4b66-ad2a-c5f63dab809c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945061 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a3c47983-799f-4700-b733-d040ca0159a3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945086 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a3649bba-310a-439c-9d29-75c684e6d06a-proxy-tls\") pod \"machine-config-controller-84d6567774-bxntg\" (UID: \"a3649bba-310a-439c-9d29-75c684e6d06a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945120 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a3c47983-799f-4700-b733-d040ca0159a3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945148 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/64305475-68e4-4261-84eb-f72a300194f6-srv-cert\") pod \"olm-operator-6b444d44fb-zp8rr\" (UID: \"64305475-68e4-4261-84eb-f72a300194f6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945163 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkv9n\" (UniqueName: \"kubernetes.io/projected/c6a94168-8373-4b69-ada3-934f7eeeb408-kube-api-access-wkv9n\") pod \"multus-admission-controller-857f4d67dd-tf562\" (UID: \"c6a94168-8373-4b69-ada3-934f7eeeb408\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945198 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/545b98b0-939f-4840-9d10-57ec468b1d62-metrics-tls\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945215 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78hlk\" (UniqueName: \"kubernetes.io/projected/64305475-68e4-4261-84eb-f72a300194f6-kube-api-access-78hlk\") pod \"olm-operator-6b444d44fb-zp8rr\" (UID: \"64305475-68e4-4261-84eb-f72a300194f6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945232 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/545b98b0-939f-4840-9d10-57ec468b1d62-bound-sa-token\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945250 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db36b460-4849-4b66-ad2a-c5f63dab809c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-p4bwb\" (UID: \"db36b460-4849-4b66-ad2a-c5f63dab809c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945267 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvfws\" (UniqueName: \"kubernetes.io/projected/545b98b0-939f-4840-9d10-57ec468b1d62-kube-api-access-jvfws\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945299 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/545b98b0-939f-4840-9d10-57ec468b1d62-trusted-ca\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945321 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/64305475-68e4-4261-84eb-f72a300194f6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-zp8rr\" (UID: \"64305475-68e4-4261-84eb-f72a300194f6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945339 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3649bba-310a-439c-9d29-75c684e6d06a-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bxntg\" (UID: \"a3649bba-310a-439c-9d29-75c684e6d06a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945358 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkcrr\" (UniqueName: \"kubernetes.io/projected/a3649bba-310a-439c-9d29-75c684e6d06a-kube-api-access-lkcrr\") pod \"machine-config-controller-84d6567774-bxntg\" (UID: \"a3649bba-310a-439c-9d29-75c684e6d06a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945398 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf9zk\" (UniqueName: \"kubernetes.io/projected/a3c47983-799f-4700-b733-d040ca0159a3-kube-api-access-nf9zk\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945421 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a3c47983-799f-4700-b733-d040ca0159a3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.945738 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db36b460-4849-4b66-ad2a-c5f63dab809c-config\") pod \"kube-controller-manager-operator-78b949d7b-p4bwb\" (UID: \"db36b460-4849-4b66-ad2a-c5f63dab809c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.946987 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3649bba-310a-439c-9d29-75c684e6d06a-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bxntg\" (UID: \"a3649bba-310a-439c-9d29-75c684e6d06a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.946999 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a3c47983-799f-4700-b733-d040ca0159a3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.950062 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a3c47983-799f-4700-b733-d040ca0159a3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.957791 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/545b98b0-939f-4840-9d10-57ec468b1d62-metrics-tls\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.957853 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db36b460-4849-4b66-ad2a-c5f63dab809c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-p4bwb\" (UID: \"db36b460-4849-4b66-ad2a-c5f63dab809c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.958136 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c6a94168-8373-4b69-ada3-934f7eeeb408-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-tf562\" (UID: \"c6a94168-8373-4b69-ada3-934f7eeeb408\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.958374 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.982581 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.987163 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/545b98b0-939f-4840-9d10-57ec468b1d62-trusted-ca\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:10 crc kubenswrapper[4840]: I1209 16:59:10.996242 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.015234 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.036568 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.055386 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.074867 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.095308 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.115700 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.135684 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.140149 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/64305475-68e4-4261-84eb-f72a300194f6-srv-cert\") pod \"olm-operator-6b444d44fb-zp8rr\" (UID: \"64305475-68e4-4261-84eb-f72a300194f6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.156495 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.175644 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.195890 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.200117 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/64305475-68e4-4261-84eb-f72a300194f6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-zp8rr\" (UID: \"64305475-68e4-4261-84eb-f72a300194f6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.236252 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.256152 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.276272 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.295069 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.300301 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a3649bba-310a-439c-9d29-75c684e6d06a-proxy-tls\") pod \"machine-config-controller-84d6567774-bxntg\" (UID: \"a3649bba-310a-439c-9d29-75c684e6d06a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.315917 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.336189 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.356929 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.375695 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.396224 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.416331 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.435699 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.455663 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.477329 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.495660 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.515727 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.537234 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.557276 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.576930 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.595948 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.616141 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.636367 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.656038 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.696944 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.716781 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.736514 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.756802 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.776707 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.793489 4840 request.go:700] Waited for 1.011862838s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca/secrets?fieldSelector=metadata.name%3Dsigning-key&limit=500&resourceVersion=0 Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.795456 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.816232 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.835638 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.857265 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.876242 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.895361 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.917066 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.936552 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.956699 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.975875 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 09 16:59:11 crc kubenswrapper[4840]: I1209 16:59:11.997166 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.028359 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.035491 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.055281 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.075663 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.095950 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.116022 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.136515 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.157025 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.175667 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.195944 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.216245 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.235999 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.255676 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.275746 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.295635 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.315745 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.336316 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.356681 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.375776 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.396146 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.415504 4840 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.436748 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.484347 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqqtk\" (UniqueName: \"kubernetes.io/projected/7f21e203-bf50-49e2-9dfe-90606c2e0ff2-kube-api-access-lqqtk\") pod \"openshift-apiserver-operator-796bbdcf4f-krdgz\" (UID: \"7f21e203-bf50-49e2-9dfe-90606c2e0ff2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.493135 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.495812 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ns4wt\" (UniqueName: \"kubernetes.io/projected/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-kube-api-access-ns4wt\") pod \"oauth-openshift-558db77b4-pr2p8\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.528558 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6r7sv\" (UniqueName: \"kubernetes.io/projected/2c88493e-2461-4e30-b7c9-803beb3fec3b-kube-api-access-6r7sv\") pod \"console-f9d7485db-z8p7f\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.545010 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6jgf\" (UniqueName: \"kubernetes.io/projected/30551113-d3e0-4335-910a-433ea706e8e2-kube-api-access-x6jgf\") pod \"apiserver-76f77b778f-4whfq\" (UID: \"30551113-d3e0-4335-910a-433ea706e8e2\") " pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.557895 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9q4mm\" (UniqueName: \"kubernetes.io/projected/49c57793-db28-4be3-81ee-01570255716c-kube-api-access-9q4mm\") pod \"machine-api-operator-5694c8668f-g5dvm\" (UID: \"49c57793-db28-4be3-81ee-01570255716c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.571646 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7zll\" (UniqueName: \"kubernetes.io/projected/75849ca2-4d93-4c36-98aa-d159d4d03973-kube-api-access-n7zll\") pod \"machine-approver-56656f9798-7l2gj\" (UID: \"75849ca2-4d93-4c36-98aa-d159d4d03973\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.575296 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.587019 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.603761 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-246ks\" (UniqueName: \"kubernetes.io/projected/8f670d36-8ffd-4608-9e4e-6e4e52e5524c-kube-api-access-246ks\") pod \"authentication-operator-69f744f599-thzxd\" (UID: \"8f670d36-8ffd-4608-9e4e-6e4e52e5524c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.623769 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zs7vr\" (UniqueName: \"kubernetes.io/projected/47c87108-afd0-41e4-b1be-8221158b81c4-kube-api-access-zs7vr\") pod \"cluster-samples-operator-665b6dd947-g5g2d\" (UID: \"47c87108-afd0-41e4-b1be-8221158b81c4\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.637874 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ljjl\" (UniqueName: \"kubernetes.io/projected/25ed38df-aac2-41d6-a51b-694af6b9cbf3-kube-api-access-5ljjl\") pod \"apiserver-7bbb656c7d-qfszn\" (UID: \"25ed38df-aac2-41d6-a51b-694af6b9cbf3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.659838 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5k8p\" (UniqueName: \"kubernetes.io/projected/40f11448-6267-4747-9954-da5b290bcef6-kube-api-access-p5k8p\") pod \"controller-manager-879f6c89f-7p6wc\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.687591 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8lr9\" (UniqueName: \"kubernetes.io/projected/dd8094fb-e45c-481c-99b5-881758870b4b-kube-api-access-j8lr9\") pod \"openshift-config-operator-7777fb866f-b82t4\" (UID: \"dd8094fb-e45c-481c-99b5-881758870b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.692362 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.696048 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtk2n\" (UniqueName: \"kubernetes.io/projected/9dbd355e-ca75-4d1a-8aa7-a19655ae31be-kube-api-access-vtk2n\") pod \"console-operator-58897d9998-8x5sv\" (UID: \"9dbd355e-ca75-4d1a-8aa7-a19655ae31be\") " pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.696549 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.702629 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.716107 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.720123 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.728049 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.734493 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.736368 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.756069 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 09 16:59:12 crc kubenswrapper[4840]: W1209 16:59:12.764774 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75849ca2_4d93_4c36_98aa_d159d4d03973.slice/crio-8e10e68bc3f62f6610af8dd163b89c8b48b92b1fedfc1f89303cfcc3d09bbbbf WatchSource:0}: Error finding container 8e10e68bc3f62f6610af8dd163b89c8b48b92b1fedfc1f89303cfcc3d09bbbbf: Status 404 returned error can't find the container with id 8e10e68bc3f62f6610af8dd163b89c8b48b92b1fedfc1f89303cfcc3d09bbbbf Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.766724 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.772080 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz"] Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.774604 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.789646 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.793914 4840 request.go:700] Waited for 1.924775104s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.795404 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.804262 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.811570 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-z8p7f"] Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.815314 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 09 16:59:12 crc kubenswrapper[4840]: W1209 16:59:12.831916 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c88493e_2461_4e30_b7c9_803beb3fec3b.slice/crio-e5fb0cad3edba8f11e30c7736ba7f8ccced9f3bb82548b7763e6b886c7591988 WatchSource:0}: Error finding container e5fb0cad3edba8f11e30c7736ba7f8ccced9f3bb82548b7763e6b886c7591988: Status 404 returned error can't find the container with id e5fb0cad3edba8f11e30c7736ba7f8ccced9f3bb82548b7763e6b886c7591988 Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.833219 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.835177 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.856905 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.881432 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.920124 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a3c47983-799f-4700-b733-d040ca0159a3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.944091 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkqql\" (UniqueName: \"kubernetes.io/projected/acf89e2d-7f49-4872-96dd-41d47629998c-kube-api-access-gkqql\") pod \"downloads-7954f5f757-h5ltv\" (UID: \"acf89e2d-7f49-4872-96dd-41d47629998c\") " pod="openshift-console/downloads-7954f5f757-h5ltv" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.952020 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/db36b460-4849-4b66-ad2a-c5f63dab809c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-p4bwb\" (UID: \"db36b460-4849-4b66-ad2a-c5f63dab809c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.971027 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/545b98b0-939f-4840-9d10-57ec468b1d62-bound-sa-token\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.975713 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4whfq"] Dec 09 16:59:12 crc kubenswrapper[4840]: I1209 16:59:12.992531 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkv9n\" (UniqueName: \"kubernetes.io/projected/c6a94168-8373-4b69-ada3-934f7eeeb408-kube-api-access-wkv9n\") pod \"multus-admission-controller-857f4d67dd-tf562\" (UID: \"c6a94168-8373-4b69-ada3-934f7eeeb408\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" Dec 09 16:59:12 crc kubenswrapper[4840]: W1209 16:59:12.993522 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30551113_d3e0_4335_910a_433ea706e8e2.slice/crio-a3b58dea173cf39a8c387a281346015ce224429d029d9c1f4e62e0406fe1f53e WatchSource:0}: Error finding container a3b58dea173cf39a8c387a281346015ce224429d029d9c1f4e62e0406fe1f53e: Status 404 returned error can't find the container with id a3b58dea173cf39a8c387a281346015ce224429d029d9c1f4e62e0406fe1f53e Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.010913 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78hlk\" (UniqueName: \"kubernetes.io/projected/64305475-68e4-4261-84eb-f72a300194f6-kube-api-access-78hlk\") pod \"olm-operator-6b444d44fb-zp8rr\" (UID: \"64305475-68e4-4261-84eb-f72a300194f6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.033495 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvfws\" (UniqueName: \"kubernetes.io/projected/545b98b0-939f-4840-9d10-57ec468b1d62-kube-api-access-jvfws\") pod \"ingress-operator-5b745b69d9-phbcj\" (UID: \"545b98b0-939f-4840-9d10-57ec468b1d62\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.050718 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf9zk\" (UniqueName: \"kubernetes.io/projected/a3c47983-799f-4700-b733-d040ca0159a3-kube-api-access-nf9zk\") pod \"cluster-image-registry-operator-dc59b4c8b-vv579\" (UID: \"a3c47983-799f-4700-b733-d040ca0159a3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.055663 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pr2p8"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.068095 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkcrr\" (UniqueName: \"kubernetes.io/projected/a3649bba-310a-439c-9d29-75c684e6d06a-kube-api-access-lkcrr\") pod \"machine-config-controller-84d6567774-bxntg\" (UID: \"a3649bba-310a-439c-9d29-75c684e6d06a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.128556 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-8x5sv"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.166010 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.166444 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-b82t4"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.184645 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/1aa9c070-6503-4910-939a-1cb223568209-etcd-ca\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.184699 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/1aa9c070-6503-4910-939a-1cb223568209-etcd-service-ca\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.184725 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tg42z\" (UniqueName: \"kubernetes.io/projected/0978ab4b-fdc1-46ac-94e2-ead3135e1ceb-kube-api-access-tg42z\") pod \"control-plane-machine-set-operator-78cbb6b69f-mtqnx\" (UID: \"0978ab4b-fdc1-46ac-94e2-ead3135e1ceb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.184902 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d00d8a0-9935-4564-95e9-022e9698358b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-b9k6h\" (UID: \"7d00d8a0-9935-4564-95e9-022e9698358b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.184923 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c648416a-e4c7-4ce4-97e5-33393cead15e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.184980 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1aa9c070-6503-4910-939a-1cb223568209-etcd-client\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.184998 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f75c37d0-2465-4202-9cf4-981ee305fe89-metrics-certs\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185013 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-certificates\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185051 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-bound-sa-token\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185069 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f75c37d0-2465-4202-9cf4-981ee305fe89-stats-auth\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185086 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d00d8a0-9935-4564-95e9-022e9698358b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-b9k6h\" (UID: \"7d00d8a0-9935-4564-95e9-022e9698358b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185113 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aa9c070-6503-4910-939a-1cb223568209-config\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185139 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38eb149d-5a2c-49e2-9c8b-50e0c720a598-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-9vm9j\" (UID: \"38eb149d-5a2c-49e2-9c8b-50e0c720a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185153 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-trusted-ca\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185168 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d00d8a0-9935-4564-95e9-022e9698358b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-b9k6h\" (UID: \"7d00d8a0-9935-4564-95e9-022e9698358b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185186 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6wzk\" (UniqueName: \"kubernetes.io/projected/185aeb05-e73d-4ece-a947-8163702dd545-kube-api-access-v6wzk\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185210 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38eb149d-5a2c-49e2-9c8b-50e0c720a598-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-9vm9j\" (UID: \"38eb149d-5a2c-49e2-9c8b-50e0c720a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185224 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-client-ca\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185241 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f75c37d0-2465-4202-9cf4-981ee305fe89-default-certificate\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185257 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-config\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185274 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c648416a-e4c7-4ce4-97e5-33393cead15e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185307 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/0978ab4b-fdc1-46ac-94e2-ead3135e1ceb-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-mtqnx\" (UID: \"0978ab4b-fdc1-46ac-94e2-ead3135e1ceb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185324 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1aa9c070-6503-4910-939a-1cb223568209-serving-cert\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185350 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1cdfeed7-7f12-43d3-aee8-3a14ab37eac7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-k4vwg\" (UID: \"1cdfeed7-7f12-43d3-aee8-3a14ab37eac7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185376 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1cdfeed7-7f12-43d3-aee8-3a14ab37eac7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-k4vwg\" (UID: \"1cdfeed7-7f12-43d3-aee8-3a14ab37eac7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185403 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j886w\" (UniqueName: \"kubernetes.io/projected/38eb149d-5a2c-49e2-9c8b-50e0c720a598-kube-api-access-j886w\") pod \"openshift-controller-manager-operator-756b6f6bc6-9vm9j\" (UID: \"38eb149d-5a2c-49e2-9c8b-50e0c720a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185428 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pq6v6\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-kube-api-access-pq6v6\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185452 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pphj9\" (UniqueName: \"kubernetes.io/projected/f75c37d0-2465-4202-9cf4-981ee305fe89-kube-api-access-pphj9\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185521 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jck4v\" (UniqueName: \"kubernetes.io/projected/1aa9c070-6503-4910-939a-1cb223568209-kube-api-access-jck4v\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185551 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185572 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cdfeed7-7f12-43d3-aee8-3a14ab37eac7-config\") pod \"kube-apiserver-operator-766d6c64bb-k4vwg\" (UID: \"1cdfeed7-7f12-43d3-aee8-3a14ab37eac7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.185613 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/185aeb05-e73d-4ece-a947-8163702dd545-serving-cert\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.186582 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f75c37d0-2465-4202-9cf4-981ee305fe89-service-ca-bundle\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.186641 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-tls\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: E1209 16:59:13.187057 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:13.687043881 +0000 UTC m=+139.678154514 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:13 crc kubenswrapper[4840]: W1209 16:59:13.199514 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd8094fb_e45c_481c_99b5_881758870b4b.slice/crio-cac7651fcc71281fe101f4fb83fc35313c26ae5ca46fe990b2c671d6d7c96790 WatchSource:0}: Error finding container cac7651fcc71281fe101f4fb83fc35313c26ae5ca46fe990b2c671d6d7c96790: Status 404 returned error can't find the container with id cac7651fcc71281fe101f4fb83fc35313c26ae5ca46fe990b2c671d6d7c96790 Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.200512 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-h5ltv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.201804 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.204231 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p6wc"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.208484 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" Dec 09 16:59:13 crc kubenswrapper[4840]: W1209 16:59:13.216067 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40f11448_6267_4747_9954_da5b290bcef6.slice/crio-e8b38d18bb2de70b0853a8e341e3c0ddc0877e27a2f37c7acd51d61deb0d4846 WatchSource:0}: Error finding container e8b38d18bb2de70b0853a8e341e3c0ddc0877e27a2f37c7acd51d61deb0d4846: Status 404 returned error can't find the container with id e8b38d18bb2de70b0853a8e341e3c0ddc0877e27a2f37c7acd51d61deb0d4846 Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.227251 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289139 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" Dec 09 16:59:13 crc kubenswrapper[4840]: E1209 16:59:13.289386 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:13.789368405 +0000 UTC m=+139.780479038 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289307 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289663 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-csi-data-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289703 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/185aeb05-e73d-4ece-a947-8163702dd545-serving-cert\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289727 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-registration-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289750 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km5mj\" (UniqueName: \"kubernetes.io/projected/64b4f46c-b8bf-469f-8288-701d04a35911-kube-api-access-km5mj\") pod \"catalog-operator-68c6474976-f547s\" (UID: \"64b4f46c-b8bf-469f-8288-701d04a35911\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289827 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f75c37d0-2465-4202-9cf4-981ee305fe89-service-ca-bundle\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289848 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/612b0dd9-1b93-4d09-818c-3a139b6f31aa-signing-key\") pod \"service-ca-9c57cc56f-zfvrz\" (UID: \"612b0dd9-1b93-4d09-818c-3a139b6f31aa\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289870 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b6292462-00c0-4a89-84a6-88fe8e6fdfca-images\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289901 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/15620c93-7aea-4b1f-9073-af5d15bc231d-cert\") pod \"ingress-canary-wlgg5\" (UID: \"15620c93-7aea-4b1f-9073-af5d15bc231d\") " pod="openshift-ingress-canary/ingress-canary-wlgg5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289922 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cc781ca-e849-4d81-a786-bb095749564e-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-njt76\" (UID: \"2cc781ca-e849-4d81-a786-bb095749564e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.289945 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/64b4f46c-b8bf-469f-8288-701d04a35911-profile-collector-cert\") pod \"catalog-operator-68c6474976-f547s\" (UID: \"64b4f46c-b8bf-469f-8288-701d04a35911\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290009 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-tls\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290073 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dt55k\" (UniqueName: \"kubernetes.io/projected/4bb6a7d5-bada-4467-90de-dfa4e2490733-kube-api-access-dt55k\") pod \"dns-default-vvcq2\" (UID: \"4bb6a7d5-bada-4467-90de-dfa4e2490733\") " pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290095 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/229bd7db-2f71-4bbf-baf1-b41a4368b271-metrics-tls\") pod \"dns-operator-744455d44c-5qngm\" (UID: \"229bd7db-2f71-4bbf-baf1-b41a4368b271\") " pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290117 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/1aa9c070-6503-4910-939a-1cb223568209-etcd-ca\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290140 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/1aa9c070-6503-4910-939a-1cb223568209-etcd-service-ca\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290176 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tg42z\" (UniqueName: \"kubernetes.io/projected/0978ab4b-fdc1-46ac-94e2-ead3135e1ceb-kube-api-access-tg42z\") pod \"control-plane-machine-set-operator-78cbb6b69f-mtqnx\" (UID: \"0978ab4b-fdc1-46ac-94e2-ead3135e1ceb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290200 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d00d8a0-9935-4564-95e9-022e9698358b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-b9k6h\" (UID: \"7d00d8a0-9935-4564-95e9-022e9698358b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290222 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/706629a8-1b29-4b41-9ab6-15a30dd32bec-apiservice-cert\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290246 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c648416a-e4c7-4ce4-97e5-33393cead15e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290286 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1aa9c070-6503-4910-939a-1cb223568209-etcd-client\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290308 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t22b8\" (UniqueName: \"kubernetes.io/projected/612b0dd9-1b93-4d09-818c-3a139b6f31aa-kube-api-access-t22b8\") pod \"service-ca-9c57cc56f-zfvrz\" (UID: \"612b0dd9-1b93-4d09-818c-3a139b6f31aa\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290331 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0eb66ea0-ca48-4f56-8911-0a048eb73a04-secret-volume\") pod \"collect-profiles-29421645-m9lrc\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290359 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3cc0c386-8560-4984-aba9-a2c7f7eb6c2a-serving-cert\") pod \"service-ca-operator-777779d784-jhqph\" (UID: \"3cc0c386-8560-4984-aba9-a2c7f7eb6c2a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290380 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-mountpoint-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290402 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x562z\" (UniqueName: \"kubernetes.io/projected/0eb66ea0-ca48-4f56-8911-0a048eb73a04-kube-api-access-x562z\") pod \"collect-profiles-29421645-m9lrc\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290424 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bbe4e537-04ac-420c-9c31-ae521a1e8de2-node-bootstrap-token\") pod \"machine-config-server-8v7mq\" (UID: \"bbe4e537-04ac-420c-9c31-ae521a1e8de2\") " pod="openshift-machine-config-operator/machine-config-server-8v7mq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290446 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f75c37d0-2465-4202-9cf4-981ee305fe89-metrics-certs\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290468 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-certificates\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.290492 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-bound-sa-token\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.296640 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f75c37d0-2465-4202-9cf4-981ee305fe89-metrics-certs\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.296860 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f75c37d0-2465-4202-9cf4-981ee305fe89-stats-auth\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.296947 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.299245 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d00d8a0-9935-4564-95e9-022e9698358b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-b9k6h\" (UID: \"7d00d8a0-9935-4564-95e9-022e9698358b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.299281 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52mhv\" (UniqueName: \"kubernetes.io/projected/706629a8-1b29-4b41-9ab6-15a30dd32bec-kube-api-access-52mhv\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.299313 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3cc0c386-8560-4984-aba9-a2c7f7eb6c2a-config\") pod \"service-ca-operator-777779d784-jhqph\" (UID: \"3cc0c386-8560-4984-aba9-a2c7f7eb6c2a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.299917 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmx68\" (UniqueName: \"kubernetes.io/projected/cbed776b-da99-4e71-9128-f5bbb8eeb100-kube-api-access-gmx68\") pod \"migrator-59844c95c7-crvj6\" (UID: \"cbed776b-da99-4e71-9128-f5bbb8eeb100\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.300484 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aa9c070-6503-4910-939a-1cb223568209-config\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.300549 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5kvh\" (UniqueName: \"kubernetes.io/projected/07b0580c-0d27-48ee-8f33-3c5d7638ac47-kube-api-access-r5kvh\") pod \"marketplace-operator-79b997595-2ckb5\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.300581 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/706629a8-1b29-4b41-9ab6-15a30dd32bec-webhook-cert\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.301267 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9zr2s\" (UID: \"a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.300938 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/185aeb05-e73d-4ece-a947-8163702dd545-serving-cert\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.301109 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-certificates\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.301375 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38eb149d-5a2c-49e2-9c8b-50e0c720a598-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-9vm9j\" (UID: \"38eb149d-5a2c-49e2-9c8b-50e0c720a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.301576 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-trusted-ca\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.301609 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d00d8a0-9935-4564-95e9-022e9698358b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-b9k6h\" (UID: \"7d00d8a0-9935-4564-95e9-022e9698358b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.301662 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/706629a8-1b29-4b41-9ab6-15a30dd32bec-tmpfs\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.301685 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lcvm\" (UniqueName: \"kubernetes.io/projected/2cc781ca-e849-4d81-a786-bb095749564e-kube-api-access-5lcvm\") pod \"kube-storage-version-migrator-operator-b67b599dd-njt76\" (UID: \"2cc781ca-e849-4d81-a786-bb095749564e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.301732 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2ckb5\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.301776 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4bb6a7d5-bada-4467-90de-dfa4e2490733-config-volume\") pod \"dns-default-vvcq2\" (UID: \"4bb6a7d5-bada-4467-90de-dfa4e2490733\") " pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.302942 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f75c37d0-2465-4202-9cf4-981ee305fe89-service-ca-bundle\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.304949 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c648416a-e4c7-4ce4-97e5-33393cead15e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.305535 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/1aa9c070-6503-4910-939a-1cb223568209-etcd-service-ca\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.305557 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1aa9c070-6503-4910-939a-1cb223568209-etcd-client\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.306580 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/1aa9c070-6503-4910-939a-1cb223568209-etcd-ca\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.306593 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-trusted-ca\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.307247 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f75c37d0-2465-4202-9cf4-981ee305fe89-stats-auth\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.307944 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38eb149d-5a2c-49e2-9c8b-50e0c720a598-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-9vm9j\" (UID: \"38eb149d-5a2c-49e2-9c8b-50e0c720a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.308214 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.308271 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d00d8a0-9935-4564-95e9-022e9698358b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-b9k6h\" (UID: \"7d00d8a0-9935-4564-95e9-022e9698358b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.308445 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6wzk\" (UniqueName: \"kubernetes.io/projected/185aeb05-e73d-4ece-a947-8163702dd545-kube-api-access-v6wzk\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.308631 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-socket-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.308832 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aa9c070-6503-4910-939a-1cb223568209-config\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.308873 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38eb149d-5a2c-49e2-9c8b-50e0c720a598-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-9vm9j\" (UID: \"38eb149d-5a2c-49e2-9c8b-50e0c720a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.308891 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9jqg\" (UniqueName: \"kubernetes.io/projected/229bd7db-2f71-4bbf-baf1-b41a4368b271-kube-api-access-x9jqg\") pod \"dns-operator-744455d44c-5qngm\" (UID: \"229bd7db-2f71-4bbf-baf1-b41a4368b271\") " pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.309509 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38eb149d-5a2c-49e2-9c8b-50e0c720a598-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-9vm9j\" (UID: \"38eb149d-5a2c-49e2-9c8b-50e0c720a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.309548 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-client-ca\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.310138 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-client-ca\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.310166 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f75c37d0-2465-4202-9cf4-981ee305fe89-default-certificate\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.310218 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-config\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.310529 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/612b0dd9-1b93-4d09-818c-3a139b6f31aa-signing-cabundle\") pod \"service-ca-9c57cc56f-zfvrz\" (UID: \"612b0dd9-1b93-4d09-818c-3a139b6f31aa\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.310634 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-tls\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.310562 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c648416a-e4c7-4ce4-97e5-33393cead15e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.310843 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b6292462-00c0-4a89-84a6-88fe8e6fdfca-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311182 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c648416a-e4c7-4ce4-97e5-33393cead15e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311182 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-config\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311251 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jzx5\" (UniqueName: \"kubernetes.io/projected/9911858a-e920-426e-ae41-c97fb62b70c9-kube-api-access-7jzx5\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311274 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/64b4f46c-b8bf-469f-8288-701d04a35911-srv-cert\") pod \"catalog-operator-68c6474976-f547s\" (UID: \"64b4f46c-b8bf-469f-8288-701d04a35911\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311291 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cc781ca-e849-4d81-a786-bb095749564e-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-njt76\" (UID: \"2cc781ca-e849-4d81-a786-bb095749564e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311311 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/0978ab4b-fdc1-46ac-94e2-ead3135e1ceb-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-mtqnx\" (UID: \"0978ab4b-fdc1-46ac-94e2-ead3135e1ceb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311331 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4bb6a7d5-bada-4467-90de-dfa4e2490733-metrics-tls\") pod \"dns-default-vvcq2\" (UID: \"4bb6a7d5-bada-4467-90de-dfa4e2490733\") " pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311364 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1aa9c070-6503-4910-939a-1cb223568209-serving-cert\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311401 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2ckb5\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311420 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b6292462-00c0-4a89-84a6-88fe8e6fdfca-proxy-tls\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311461 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1cdfeed7-7f12-43d3-aee8-3a14ab37eac7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-k4vwg\" (UID: \"1cdfeed7-7f12-43d3-aee8-3a14ab37eac7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311480 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1cdfeed7-7f12-43d3-aee8-3a14ab37eac7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-k4vwg\" (UID: \"1cdfeed7-7f12-43d3-aee8-3a14ab37eac7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311500 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j886w\" (UniqueName: \"kubernetes.io/projected/38eb149d-5a2c-49e2-9c8b-50e0c720a598-kube-api-access-j886w\") pod \"openshift-controller-manager-operator-756b6f6bc6-9vm9j\" (UID: \"38eb149d-5a2c-49e2-9c8b-50e0c720a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311529 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pq6v6\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-kube-api-access-pq6v6\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311545 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8kgr\" (UniqueName: \"kubernetes.io/projected/a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8-kube-api-access-s8kgr\") pod \"package-server-manager-789f6589d5-9zr2s\" (UID: \"a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311567 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8csfr\" (UniqueName: \"kubernetes.io/projected/3cc0c386-8560-4984-aba9-a2c7f7eb6c2a-kube-api-access-8csfr\") pod \"service-ca-operator-777779d784-jhqph\" (UID: \"3cc0c386-8560-4984-aba9-a2c7f7eb6c2a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311581 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8kjf\" (UniqueName: \"kubernetes.io/projected/15620c93-7aea-4b1f-9073-af5d15bc231d-kube-api-access-g8kjf\") pod \"ingress-canary-wlgg5\" (UID: \"15620c93-7aea-4b1f-9073-af5d15bc231d\") " pod="openshift-ingress-canary/ingress-canary-wlgg5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311610 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pphj9\" (UniqueName: \"kubernetes.io/projected/f75c37d0-2465-4202-9cf4-981ee305fe89-kube-api-access-pphj9\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311643 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-plugins-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311657 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0eb66ea0-ca48-4f56-8911-0a048eb73a04-config-volume\") pod \"collect-profiles-29421645-m9lrc\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311686 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxqtg\" (UniqueName: \"kubernetes.io/projected/bbe4e537-04ac-420c-9c31-ae521a1e8de2-kube-api-access-wxqtg\") pod \"machine-config-server-8v7mq\" (UID: \"bbe4e537-04ac-420c-9c31-ae521a1e8de2\") " pod="openshift-machine-config-operator/machine-config-server-8v7mq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311706 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jck4v\" (UniqueName: \"kubernetes.io/projected/1aa9c070-6503-4910-939a-1cb223568209-kube-api-access-jck4v\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311742 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311758 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrhpv\" (UniqueName: \"kubernetes.io/projected/b6292462-00c0-4a89-84a6-88fe8e6fdfca-kube-api-access-mrhpv\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311773 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bbe4e537-04ac-420c-9c31-ae521a1e8de2-certs\") pod \"machine-config-server-8v7mq\" (UID: \"bbe4e537-04ac-420c-9c31-ae521a1e8de2\") " pod="openshift-machine-config-operator/machine-config-server-8v7mq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.311817 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cdfeed7-7f12-43d3-aee8-3a14ab37eac7-config\") pod \"kube-apiserver-operator-766d6c64bb-k4vwg\" (UID: \"1cdfeed7-7f12-43d3-aee8-3a14ab37eac7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.312605 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d00d8a0-9935-4564-95e9-022e9698358b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-b9k6h\" (UID: \"7d00d8a0-9935-4564-95e9-022e9698358b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.313651 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f75c37d0-2465-4202-9cf4-981ee305fe89-default-certificate\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: E1209 16:59:13.314594 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:13.814581523 +0000 UTC m=+139.805692156 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.315599 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1cdfeed7-7f12-43d3-aee8-3a14ab37eac7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-k4vwg\" (UID: \"1cdfeed7-7f12-43d3-aee8-3a14ab37eac7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.315650 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cdfeed7-7f12-43d3-aee8-3a14ab37eac7-config\") pod \"kube-apiserver-operator-766d6c64bb-k4vwg\" (UID: \"1cdfeed7-7f12-43d3-aee8-3a14ab37eac7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.318695 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-thzxd"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.319140 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-g5dvm"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.325450 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1aa9c070-6503-4910-939a-1cb223568209-serving-cert\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.327615 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/0978ab4b-fdc1-46ac-94e2-ead3135e1ceb-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-mtqnx\" (UID: \"0978ab4b-fdc1-46ac-94e2-ead3135e1ceb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.336177 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.349328 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tg42z\" (UniqueName: \"kubernetes.io/projected/0978ab4b-fdc1-46ac-94e2-ead3135e1ceb-kube-api-access-tg42z\") pod \"control-plane-machine-set-operator-78cbb6b69f-mtqnx\" (UID: \"0978ab4b-fdc1-46ac-94e2-ead3135e1ceb\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.357466 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" event={"ID":"75849ca2-4d93-4c36-98aa-d159d4d03973","Type":"ContainerStarted","Data":"030cb7f0db7cf616f769a2130324a4fdb26a0fc7f6bb3c617668fddb60715d28"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.357519 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" event={"ID":"75849ca2-4d93-4c36-98aa-d159d4d03973","Type":"ContainerStarted","Data":"8e10e68bc3f62f6610af8dd163b89c8b48b92b1fedfc1f89303cfcc3d09bbbbf"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.357828 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d00d8a0-9935-4564-95e9-022e9698358b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-b9k6h\" (UID: \"7d00d8a0-9935-4564-95e9-022e9698358b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.362522 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" event={"ID":"40f11448-6267-4747-9954-da5b290bcef6","Type":"ContainerStarted","Data":"e8b38d18bb2de70b0853a8e341e3c0ddc0877e27a2f37c7acd51d61deb0d4846"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.384709 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-z8p7f" event={"ID":"2c88493e-2461-4e30-b7c9-803beb3fec3b","Type":"ContainerStarted","Data":"ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.384748 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-z8p7f" event={"ID":"2c88493e-2461-4e30-b7c9-803beb3fec3b","Type":"ContainerStarted","Data":"e5fb0cad3edba8f11e30c7736ba7f8ccced9f3bb82548b7763e6b886c7591988"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.389555 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" event={"ID":"63ab647f-02b8-49c7-8f8b-622a0fcd73bf","Type":"ContainerStarted","Data":"a26154a00eb30705931a9ebbcf3a3f4eae2dadd43829cef4f696685eadd6b4b9"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.390575 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-bound-sa-token\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.399190 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-8x5sv" event={"ID":"9dbd355e-ca75-4d1a-8aa7-a19655ae31be","Type":"ContainerStarted","Data":"9685c1e7149ffa1d6bff15794013a9317d7e83f315eeb5a2a46ccbd3fe4e8f1d"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.399433 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-8x5sv" event={"ID":"9dbd355e-ca75-4d1a-8aa7-a19655ae31be","Type":"ContainerStarted","Data":"3613cb37f18af0a0a37bca1b031e52ca7ec83175141d7c3dae477810ae163e90"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.399813 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.402906 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" event={"ID":"25ed38df-aac2-41d6-a51b-694af6b9cbf3","Type":"ContainerStarted","Data":"deec3d99f39d05cc68720612825c12f00b12cfd265f2cb6f5168b106966b372f"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412276 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412412 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8kgr\" (UniqueName: \"kubernetes.io/projected/a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8-kube-api-access-s8kgr\") pod \"package-server-manager-789f6589d5-9zr2s\" (UID: \"a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412452 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8csfr\" (UniqueName: \"kubernetes.io/projected/3cc0c386-8560-4984-aba9-a2c7f7eb6c2a-kube-api-access-8csfr\") pod \"service-ca-operator-777779d784-jhqph\" (UID: \"3cc0c386-8560-4984-aba9-a2c7f7eb6c2a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412479 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8kjf\" (UniqueName: \"kubernetes.io/projected/15620c93-7aea-4b1f-9073-af5d15bc231d-kube-api-access-g8kjf\") pod \"ingress-canary-wlgg5\" (UID: \"15620c93-7aea-4b1f-9073-af5d15bc231d\") " pod="openshift-ingress-canary/ingress-canary-wlgg5" Dec 09 16:59:13 crc kubenswrapper[4840]: E1209 16:59:13.412522 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:13.912494993 +0000 UTC m=+139.903605706 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412562 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-plugins-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412605 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0eb66ea0-ca48-4f56-8911-0a048eb73a04-config-volume\") pod \"collect-profiles-29421645-m9lrc\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412644 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxqtg\" (UniqueName: \"kubernetes.io/projected/bbe4e537-04ac-420c-9c31-ae521a1e8de2-kube-api-access-wxqtg\") pod \"machine-config-server-8v7mq\" (UID: \"bbe4e537-04ac-420c-9c31-ae521a1e8de2\") " pod="openshift-machine-config-operator/machine-config-server-8v7mq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412699 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412726 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrhpv\" (UniqueName: \"kubernetes.io/projected/b6292462-00c0-4a89-84a6-88fe8e6fdfca-kube-api-access-mrhpv\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412747 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bbe4e537-04ac-420c-9c31-ae521a1e8de2-certs\") pod \"machine-config-server-8v7mq\" (UID: \"bbe4e537-04ac-420c-9c31-ae521a1e8de2\") " pod="openshift-machine-config-operator/machine-config-server-8v7mq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412779 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-csi-data-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412833 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-registration-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412857 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km5mj\" (UniqueName: \"kubernetes.io/projected/64b4f46c-b8bf-469f-8288-701d04a35911-kube-api-access-km5mj\") pod \"catalog-operator-68c6474976-f547s\" (UID: \"64b4f46c-b8bf-469f-8288-701d04a35911\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412886 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/612b0dd9-1b93-4d09-818c-3a139b6f31aa-signing-key\") pod \"service-ca-9c57cc56f-zfvrz\" (UID: \"612b0dd9-1b93-4d09-818c-3a139b6f31aa\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412910 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b6292462-00c0-4a89-84a6-88fe8e6fdfca-images\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.412942 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/15620c93-7aea-4b1f-9073-af5d15bc231d-cert\") pod \"ingress-canary-wlgg5\" (UID: \"15620c93-7aea-4b1f-9073-af5d15bc231d\") " pod="openshift-ingress-canary/ingress-canary-wlgg5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413000 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-plugins-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413048 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cc781ca-e849-4d81-a786-bb095749564e-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-njt76\" (UID: \"2cc781ca-e849-4d81-a786-bb095749564e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413080 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/64b4f46c-b8bf-469f-8288-701d04a35911-profile-collector-cert\") pod \"catalog-operator-68c6474976-f547s\" (UID: \"64b4f46c-b8bf-469f-8288-701d04a35911\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413117 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dt55k\" (UniqueName: \"kubernetes.io/projected/4bb6a7d5-bada-4467-90de-dfa4e2490733-kube-api-access-dt55k\") pod \"dns-default-vvcq2\" (UID: \"4bb6a7d5-bada-4467-90de-dfa4e2490733\") " pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413141 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/229bd7db-2f71-4bbf-baf1-b41a4368b271-metrics-tls\") pod \"dns-operator-744455d44c-5qngm\" (UID: \"229bd7db-2f71-4bbf-baf1-b41a4368b271\") " pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413169 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/706629a8-1b29-4b41-9ab6-15a30dd32bec-apiservice-cert\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413203 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t22b8\" (UniqueName: \"kubernetes.io/projected/612b0dd9-1b93-4d09-818c-3a139b6f31aa-kube-api-access-t22b8\") pod \"service-ca-9c57cc56f-zfvrz\" (UID: \"612b0dd9-1b93-4d09-818c-3a139b6f31aa\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413229 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0eb66ea0-ca48-4f56-8911-0a048eb73a04-secret-volume\") pod \"collect-profiles-29421645-m9lrc\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413254 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3cc0c386-8560-4984-aba9-a2c7f7eb6c2a-serving-cert\") pod \"service-ca-operator-777779d784-jhqph\" (UID: \"3cc0c386-8560-4984-aba9-a2c7f7eb6c2a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413275 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-mountpoint-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413297 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x562z\" (UniqueName: \"kubernetes.io/projected/0eb66ea0-ca48-4f56-8911-0a048eb73a04-kube-api-access-x562z\") pod \"collect-profiles-29421645-m9lrc\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413318 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bbe4e537-04ac-420c-9c31-ae521a1e8de2-node-bootstrap-token\") pod \"machine-config-server-8v7mq\" (UID: \"bbe4e537-04ac-420c-9c31-ae521a1e8de2\") " pod="openshift-machine-config-operator/machine-config-server-8v7mq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413347 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52mhv\" (UniqueName: \"kubernetes.io/projected/706629a8-1b29-4b41-9ab6-15a30dd32bec-kube-api-access-52mhv\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413370 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3cc0c386-8560-4984-aba9-a2c7f7eb6c2a-config\") pod \"service-ca-operator-777779d784-jhqph\" (UID: \"3cc0c386-8560-4984-aba9-a2c7f7eb6c2a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413392 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmx68\" (UniqueName: \"kubernetes.io/projected/cbed776b-da99-4e71-9128-f5bbb8eeb100-kube-api-access-gmx68\") pod \"migrator-59844c95c7-crvj6\" (UID: \"cbed776b-da99-4e71-9128-f5bbb8eeb100\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413422 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5kvh\" (UniqueName: \"kubernetes.io/projected/07b0580c-0d27-48ee-8f33-3c5d7638ac47-kube-api-access-r5kvh\") pod \"marketplace-operator-79b997595-2ckb5\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413448 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/706629a8-1b29-4b41-9ab6-15a30dd32bec-webhook-cert\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413473 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9zr2s\" (UID: \"a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413500 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/706629a8-1b29-4b41-9ab6-15a30dd32bec-tmpfs\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413523 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lcvm\" (UniqueName: \"kubernetes.io/projected/2cc781ca-e849-4d81-a786-bb095749564e-kube-api-access-5lcvm\") pod \"kube-storage-version-migrator-operator-b67b599dd-njt76\" (UID: \"2cc781ca-e849-4d81-a786-bb095749564e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413545 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2ckb5\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413569 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4bb6a7d5-bada-4467-90de-dfa4e2490733-config-volume\") pod \"dns-default-vvcq2\" (UID: \"4bb6a7d5-bada-4467-90de-dfa4e2490733\") " pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413599 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-socket-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413638 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9jqg\" (UniqueName: \"kubernetes.io/projected/229bd7db-2f71-4bbf-baf1-b41a4368b271-kube-api-access-x9jqg\") pod \"dns-operator-744455d44c-5qngm\" (UID: \"229bd7db-2f71-4bbf-baf1-b41a4368b271\") " pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413669 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/612b0dd9-1b93-4d09-818c-3a139b6f31aa-signing-cabundle\") pod \"service-ca-9c57cc56f-zfvrz\" (UID: \"612b0dd9-1b93-4d09-818c-3a139b6f31aa\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413687 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0eb66ea0-ca48-4f56-8911-0a048eb73a04-config-volume\") pod \"collect-profiles-29421645-m9lrc\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413704 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jzx5\" (UniqueName: \"kubernetes.io/projected/9911858a-e920-426e-ae41-c97fb62b70c9-kube-api-access-7jzx5\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413728 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b6292462-00c0-4a89-84a6-88fe8e6fdfca-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413754 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/64b4f46c-b8bf-469f-8288-701d04a35911-srv-cert\") pod \"catalog-operator-68c6474976-f547s\" (UID: \"64b4f46c-b8bf-469f-8288-701d04a35911\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413780 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cc781ca-e849-4d81-a786-bb095749564e-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-njt76\" (UID: \"2cc781ca-e849-4d81-a786-bb095749564e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413806 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4bb6a7d5-bada-4467-90de-dfa4e2490733-metrics-tls\") pod \"dns-default-vvcq2\" (UID: \"4bb6a7d5-bada-4467-90de-dfa4e2490733\") " pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413845 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2ckb5\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.413871 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b6292462-00c0-4a89-84a6-88fe8e6fdfca-proxy-tls\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.417443 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" event={"ID":"7f21e203-bf50-49e2-9dfe-90606c2e0ff2","Type":"ContainerStarted","Data":"41ad99906adb0b3edf702836ac752e9cfef0c8baa4482633c3fe4a9996a985c5"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.417486 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" event={"ID":"7f21e203-bf50-49e2-9dfe-90606c2e0ff2","Type":"ContainerStarted","Data":"9cd93f6ca5482565e39c39352ef2b5c6f9c35d3c3784d3dd08ce86d56fedf2e3"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.417924 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b6292462-00c0-4a89-84a6-88fe8e6fdfca-proxy-tls\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.418107 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-csi-data-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.418159 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-registration-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.418278 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-socket-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.418788 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4bb6a7d5-bada-4467-90de-dfa4e2490733-config-volume\") pod \"dns-default-vvcq2\" (UID: \"4bb6a7d5-bada-4467-90de-dfa4e2490733\") " pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.418899 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3cc0c386-8560-4984-aba9-a2c7f7eb6c2a-config\") pod \"service-ca-operator-777779d784-jhqph\" (UID: \"3cc0c386-8560-4984-aba9-a2c7f7eb6c2a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.418998 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/612b0dd9-1b93-4d09-818c-3a139b6f31aa-signing-cabundle\") pod \"service-ca-9c57cc56f-zfvrz\" (UID: \"612b0dd9-1b93-4d09-818c-3a139b6f31aa\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.419501 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b6292462-00c0-4a89-84a6-88fe8e6fdfca-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.420174 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bbe4e537-04ac-420c-9c31-ae521a1e8de2-certs\") pod \"machine-config-server-8v7mq\" (UID: \"bbe4e537-04ac-420c-9c31-ae521a1e8de2\") " pod="openshift-machine-config-operator/machine-config-server-8v7mq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.420247 4840 patch_prober.go:28] interesting pod/console-operator-58897d9998-8x5sv container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.420280 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-8x5sv" podUID="9dbd355e-ca75-4d1a-8aa7-a19655ae31be" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.424913 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/64b4f46c-b8bf-469f-8288-701d04a35911-srv-cert\") pod \"catalog-operator-68c6474976-f547s\" (UID: \"64b4f46c-b8bf-469f-8288-701d04a35911\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.425033 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/706629a8-1b29-4b41-9ab6-15a30dd32bec-tmpfs\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.425037 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/15620c93-7aea-4b1f-9073-af5d15bc231d-cert\") pod \"ingress-canary-wlgg5\" (UID: \"15620c93-7aea-4b1f-9073-af5d15bc231d\") " pod="openshift-ingress-canary/ingress-canary-wlgg5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.425401 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/706629a8-1b29-4b41-9ab6-15a30dd32bec-webhook-cert\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.425659 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cc781ca-e849-4d81-a786-bb095749564e-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-njt76\" (UID: \"2cc781ca-e849-4d81-a786-bb095749564e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.425850 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/612b0dd9-1b93-4d09-818c-3a139b6f31aa-signing-key\") pod \"service-ca-9c57cc56f-zfvrz\" (UID: \"612b0dd9-1b93-4d09-818c-3a139b6f31aa\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.426619 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2ckb5\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.427058 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cc781ca-e849-4d81-a786-bb095749564e-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-njt76\" (UID: \"2cc781ca-e849-4d81-a786-bb095749564e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.427500 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6wzk\" (UniqueName: \"kubernetes.io/projected/185aeb05-e73d-4ece-a947-8163702dd545-kube-api-access-v6wzk\") pod \"route-controller-manager-6576b87f9c-66mgq\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.427749 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/706629a8-1b29-4b41-9ab6-15a30dd32bec-apiservice-cert\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.427844 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b6292462-00c0-4a89-84a6-88fe8e6fdfca-images\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.427880 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" event={"ID":"dd8094fb-e45c-481c-99b5-881758870b4b","Type":"ContainerStarted","Data":"cac7651fcc71281fe101f4fb83fc35313c26ae5ca46fe990b2c671d6d7c96790"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.427998 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/9911858a-e920-426e-ae41-c97fb62b70c9-mountpoint-dir\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.428706 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0eb66ea0-ca48-4f56-8911-0a048eb73a04-secret-volume\") pod \"collect-profiles-29421645-m9lrc\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:13 crc kubenswrapper[4840]: E1209 16:59:13.428820 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:13.928804289 +0000 UTC m=+139.919914982 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.428883 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/229bd7db-2f71-4bbf-baf1-b41a4368b271-metrics-tls\") pod \"dns-operator-744455d44c-5qngm\" (UID: \"229bd7db-2f71-4bbf-baf1-b41a4368b271\") " pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.429156 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9zr2s\" (UID: \"a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.430893 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4bb6a7d5-bada-4467-90de-dfa4e2490733-metrics-tls\") pod \"dns-default-vvcq2\" (UID: \"4bb6a7d5-bada-4467-90de-dfa4e2490733\") " pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.432146 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bbe4e537-04ac-420c-9c31-ae521a1e8de2-node-bootstrap-token\") pod \"machine-config-server-8v7mq\" (UID: \"bbe4e537-04ac-420c-9c31-ae521a1e8de2\") " pod="openshift-machine-config-operator/machine-config-server-8v7mq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.432353 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/64b4f46c-b8bf-469f-8288-701d04a35911-profile-collector-cert\") pod \"catalog-operator-68c6474976-f547s\" (UID: \"64b4f46c-b8bf-469f-8288-701d04a35911\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.432985 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4whfq" event={"ID":"30551113-d3e0-4335-910a-433ea706e8e2","Type":"ContainerStarted","Data":"a3b58dea173cf39a8c387a281346015ce224429d029d9c1f4e62e0406fe1f53e"} Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.434919 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3cc0c386-8560-4984-aba9-a2c7f7eb6c2a-serving-cert\") pod \"service-ca-operator-777779d784-jhqph\" (UID: \"3cc0c386-8560-4984-aba9-a2c7f7eb6c2a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.436092 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2ckb5\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.446275 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j886w\" (UniqueName: \"kubernetes.io/projected/38eb149d-5a2c-49e2-9c8b-50e0c720a598-kube-api-access-j886w\") pod \"openshift-controller-manager-operator-756b6f6bc6-9vm9j\" (UID: \"38eb149d-5a2c-49e2-9c8b-50e0c720a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.460201 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pq6v6\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-kube-api-access-pq6v6\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.476721 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jck4v\" (UniqueName: \"kubernetes.io/projected/1aa9c070-6503-4910-939a-1cb223568209-kube-api-access-jck4v\") pod \"etcd-operator-b45778765-nw6vb\" (UID: \"1aa9c070-6503-4910-939a-1cb223568209\") " pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.491197 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.494416 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pphj9\" (UniqueName: \"kubernetes.io/projected/f75c37d0-2465-4202-9cf4-981ee305fe89-kube-api-access-pphj9\") pod \"router-default-5444994796-ngfrq\" (UID: \"f75c37d0-2465-4202-9cf4-981ee305fe89\") " pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.515115 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:13 crc kubenswrapper[4840]: E1209 16:59:13.517060 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.017041695 +0000 UTC m=+140.008152328 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.519036 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.529276 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1cdfeed7-7f12-43d3-aee8-3a14ab37eac7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-k4vwg\" (UID: \"1cdfeed7-7f12-43d3-aee8-3a14ab37eac7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.583324 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8kjf\" (UniqueName: \"kubernetes.io/projected/15620c93-7aea-4b1f-9073-af5d15bc231d-kube-api-access-g8kjf\") pod \"ingress-canary-wlgg5\" (UID: \"15620c93-7aea-4b1f-9073-af5d15bc231d\") " pod="openshift-ingress-canary/ingress-canary-wlgg5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.591040 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-h5ltv"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.599593 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.610182 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8kgr\" (UniqueName: \"kubernetes.io/projected/a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8-kube-api-access-s8kgr\") pod \"package-server-manager-789f6589d5-9zr2s\" (UID: \"a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.612335 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.617692 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: E1209 16:59:13.618107 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.118095231 +0000 UTC m=+140.109205864 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.619750 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.623508 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxqtg\" (UniqueName: \"kubernetes.io/projected/bbe4e537-04ac-420c-9c31-ae521a1e8de2-kube-api-access-wxqtg\") pod \"machine-config-server-8v7mq\" (UID: \"bbe4e537-04ac-420c-9c31-ae521a1e8de2\") " pod="openshift-machine-config-operator/machine-config-server-8v7mq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.631462 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x562z\" (UniqueName: \"kubernetes.io/projected/0eb66ea0-ca48-4f56-8911-0a048eb73a04-kube-api-access-x562z\") pod \"collect-profiles-29421645-m9lrc\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.643115 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.652032 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.657167 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8csfr\" (UniqueName: \"kubernetes.io/projected/3cc0c386-8560-4984-aba9-a2c7f7eb6c2a-kube-api-access-8csfr\") pod \"service-ca-operator-777779d784-jhqph\" (UID: \"3cc0c386-8560-4984-aba9-a2c7f7eb6c2a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.665308 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.667872 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrhpv\" (UniqueName: \"kubernetes.io/projected/b6292462-00c0-4a89-84a6-88fe8e6fdfca-kube-api-access-mrhpv\") pod \"machine-config-operator-74547568cd-2x6tv\" (UID: \"b6292462-00c0-4a89-84a6-88fe8e6fdfca\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.675814 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.685822 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km5mj\" (UniqueName: \"kubernetes.io/projected/64b4f46c-b8bf-469f-8288-701d04a35911-kube-api-access-km5mj\") pod \"catalog-operator-68c6474976-f547s\" (UID: \"64b4f46c-b8bf-469f-8288-701d04a35911\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.704986 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52mhv\" (UniqueName: \"kubernetes.io/projected/706629a8-1b29-4b41-9ab6-15a30dd32bec-kube-api-access-52mhv\") pod \"packageserver-d55dfcdfc-s9lbf\" (UID: \"706629a8-1b29-4b41-9ab6-15a30dd32bec\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.705268 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.718290 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:13 crc kubenswrapper[4840]: E1209 16:59:13.718777 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.218763344 +0000 UTC m=+140.209873977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.720738 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.723920 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9jqg\" (UniqueName: \"kubernetes.io/projected/229bd7db-2f71-4bbf-baf1-b41a4368b271-kube-api-access-x9jqg\") pod \"dns-operator-744455d44c-5qngm\" (UID: \"229bd7db-2f71-4bbf-baf1-b41a4368b271\") " pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.726414 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.734651 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.744325 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.751327 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.752656 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.753866 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmx68\" (UniqueName: \"kubernetes.io/projected/cbed776b-da99-4e71-9128-f5bbb8eeb100-kube-api-access-gmx68\") pod \"migrator-59844c95c7-crvj6\" (UID: \"cbed776b-da99-4e71-9128-f5bbb8eeb100\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.757615 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.758699 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5kvh\" (UniqueName: \"kubernetes.io/projected/07b0580c-0d27-48ee-8f33-3c5d7638ac47-kube-api-access-r5kvh\") pod \"marketplace-operator-79b997595-2ckb5\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.760360 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.768894 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jzx5\" (UniqueName: \"kubernetes.io/projected/9911858a-e920-426e-ae41-c97fb62b70c9-kube-api-access-7jzx5\") pod \"csi-hostpathplugin-j5vkl\" (UID: \"9911858a-e920-426e-ae41-c97fb62b70c9\") " pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: W1209 16:59:13.779819 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod545b98b0_939f_4840_9d10_57ec468b1d62.slice/crio-ce820a2c34c570213579f8d89b4f2d8c3ac891db76cf4fc9cdaa401906fa8d6e WatchSource:0}: Error finding container ce820a2c34c570213579f8d89b4f2d8c3ac891db76cf4fc9cdaa401906fa8d6e: Status 404 returned error can't find the container with id ce820a2c34c570213579f8d89b4f2d8c3ac891db76cf4fc9cdaa401906fa8d6e Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.783570 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.786700 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-tf562"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.788667 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-8v7mq" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.791828 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt55k\" (UniqueName: \"kubernetes.io/projected/4bb6a7d5-bada-4467-90de-dfa4e2490733-kube-api-access-dt55k\") pod \"dns-default-vvcq2\" (UID: \"4bb6a7d5-bada-4467-90de-dfa4e2490733\") " pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.795356 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-wlgg5" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.803702 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.803916 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.808454 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lcvm\" (UniqueName: \"kubernetes.io/projected/2cc781ca-e849-4d81-a786-bb095749564e-kube-api-access-5lcvm\") pod \"kube-storage-version-migrator-operator-b67b599dd-njt76\" (UID: \"2cc781ca-e849-4d81-a786-bb095749564e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.820072 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: E1209 16:59:13.820396 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.320380607 +0000 UTC m=+140.311491240 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.835404 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t22b8\" (UniqueName: \"kubernetes.io/projected/612b0dd9-1b93-4d09-818c-3a139b6f31aa-kube-api-access-t22b8\") pod \"service-ca-9c57cc56f-zfvrz\" (UID: \"612b0dd9-1b93-4d09-818c-3a139b6f31aa\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" Dec 09 16:59:13 crc kubenswrapper[4840]: W1209 16:59:13.862303 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3649bba_310a_439c_9d29_75c684e6d06a.slice/crio-dcc327d3531b97073cd7bc8e34dd9e735705453617acc26006af5659db6c6f20 WatchSource:0}: Error finding container dcc327d3531b97073cd7bc8e34dd9e735705453617acc26006af5659db6c6f20: Status 404 returned error can't find the container with id dcc327d3531b97073cd7bc8e34dd9e735705453617acc26006af5659db6c6f20 Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.887794 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.921344 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:13 crc kubenswrapper[4840]: E1209 16:59:13.921565 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.421539016 +0000 UTC m=+140.412649649 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.921629 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:13 crc kubenswrapper[4840]: E1209 16:59:13.922077 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.422062732 +0000 UTC m=+140.413173365 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.983111 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.990183 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.992909 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h"] Dec 09 16:59:13 crc kubenswrapper[4840]: I1209 16:59:13.998510 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6" Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.012183 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.022688 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.022811 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.522794498 +0000 UTC m=+140.513905131 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.022865 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.023186 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.523176799 +0000 UTC m=+140.514287432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: W1209 16:59:14.102464 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod185aeb05_e73d_4ece_a947_8163702dd545.slice/crio-3352fceb67167fc7f2e2b1868f024f5aac35505235456264ad53150dbd01aa03 WatchSource:0}: Error finding container 3352fceb67167fc7f2e2b1868f024f5aac35505235456264ad53150dbd01aa03: Status 404 returned error can't find the container with id 3352fceb67167fc7f2e2b1868f024f5aac35505235456264ad53150dbd01aa03 Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.106704 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.125822 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.126156 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.626127373 +0000 UTC m=+140.617238006 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.126272 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.128062 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.628040141 +0000 UTC m=+140.619150764 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.170993 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krdgz" podStartSLOduration=117.170948937 podStartE2EDuration="1m57.170948937s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:14.169911396 +0000 UTC m=+140.161022029" watchObservedRunningTime="2025-12-09 16:59:14.170948937 +0000 UTC m=+140.162059570" Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.227313 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.227818 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.727798348 +0000 UTC m=+140.718908981 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.255161 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.258886 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.260336 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-nw6vb"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.310609 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.314711 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.330113 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.330444 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.830430321 +0000 UTC m=+140.821540954 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.387377 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5qngm"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.393919 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv"] Dec 09 16:59:14 crc kubenswrapper[4840]: W1209 16:59:14.396789 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod64b4f46c_b8bf_469f_8288_701d04a35911.slice/crio-7508407ef207e4097517682a87e34441a353565d8614db1a53c4cde00fccce7b WatchSource:0}: Error finding container 7508407ef207e4097517682a87e34441a353565d8614db1a53c4cde00fccce7b: Status 404 returned error can't find the container with id 7508407ef207e4097517682a87e34441a353565d8614db1a53c4cde00fccce7b Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.432875 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.433300 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:14.933280452 +0000 UTC m=+140.924391085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.483930 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" event={"ID":"47c87108-afd0-41e4-b1be-8221158b81c4","Type":"ContainerStarted","Data":"952642c2c36fddff85c6f94f248cb20ce647cecb69db9fa31b782fc367b56dd3"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.483989 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" event={"ID":"47c87108-afd0-41e4-b1be-8221158b81c4","Type":"ContainerStarted","Data":"8b4647abfdc3f03de98240088d2fab5dd02bbfb76d13e4909ffe9c9b15236842"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.491181 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" event={"ID":"545b98b0-939f-4840-9d10-57ec468b1d62","Type":"ContainerStarted","Data":"ce820a2c34c570213579f8d89b4f2d8c3ac891db76cf4fc9cdaa401906fa8d6e"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.497738 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" event={"ID":"49c57793-db28-4be3-81ee-01570255716c","Type":"ContainerStarted","Data":"5a5891c048e8f5c0dc982b35453cad1e2e9ada9737cd1be8c642b4609d372d8f"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.497776 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" event={"ID":"49c57793-db28-4be3-81ee-01570255716c","Type":"ContainerStarted","Data":"b3d531cd3e23bec98e7c8d8332f96e5f02950d08a8e02f1550927e203a24b661"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.506759 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jhqph"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.520454 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" event={"ID":"40f11448-6267-4747-9954-da5b290bcef6","Type":"ContainerStarted","Data":"78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.520500 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.522302 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" event={"ID":"64b4f46c-b8bf-469f-8288-701d04a35911","Type":"ContainerStarted","Data":"7508407ef207e4097517682a87e34441a353565d8614db1a53c4cde00fccce7b"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.534528 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.535056 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:15.035037419 +0000 UTC m=+141.026148052 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.548465 4840 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-7p6wc container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.548559 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" podUID="40f11448-6267-4747-9954-da5b290bcef6" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.599741 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-wlgg5"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.607735 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" event={"ID":"63ab647f-02b8-49c7-8f8b-622a0fcd73bf","Type":"ContainerStarted","Data":"ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.626753 4840 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-pr2p8 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" start-of-body= Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.633040 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" podUID="63ab647f-02b8-49c7-8f8b-622a0fcd73bf" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.635986 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.637797 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:15.137774066 +0000 UTC m=+141.128884699 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.653057 4840 generic.go:334] "Generic (PLEG): container finished" podID="25ed38df-aac2-41d6-a51b-694af6b9cbf3" containerID="a53eacaa2ff9ab30c92c56027724ffd1630bb2222e2df556c201c1a0668a02c6" exitCode=0 Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.684207 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.684261 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.685718 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" event={"ID":"25ed38df-aac2-41d6-a51b-694af6b9cbf3","Type":"ContainerDied","Data":"a53eacaa2ff9ab30c92c56027724ffd1630bb2222e2df556c201c1a0668a02c6"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.685756 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-8v7mq" event={"ID":"bbe4e537-04ac-420c-9c31-ae521a1e8de2","Type":"ContainerStarted","Data":"a6b7a048cbdd147d42282f0176ec86538912df9e4df23b7534194d22458f45aa"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.700115 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.710313 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" event={"ID":"1cdfeed7-7f12-43d3-aee8-3a14ab37eac7","Type":"ContainerStarted","Data":"fa5e614566a666f82005d7e970486139ad33fb005277d34509da7b7036448b5d"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.716255 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.737905 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.740413 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:15.2403991 +0000 UTC m=+141.231509733 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.759195 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" event={"ID":"75849ca2-4d93-4c36-98aa-d159d4d03973","Type":"ContainerStarted","Data":"46f22d31168637e2a6c0646912a6151f37ade9ea889ee5d49d7ffab6234669ce"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.765894 4840 generic.go:334] "Generic (PLEG): container finished" podID="30551113-d3e0-4335-910a-433ea706e8e2" containerID="b747287c81d926eae09d5485182b0869746cfc92a1ac7b1004983b842aefd004" exitCode=0 Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.765996 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4whfq" event={"ID":"30551113-d3e0-4335-910a-433ea706e8e2","Type":"ContainerDied","Data":"b747287c81d926eae09d5485182b0869746cfc92a1ac7b1004983b842aefd004"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.771513 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-ngfrq" event={"ID":"f75c37d0-2465-4202-9cf4-981ee305fe89","Type":"ContainerStarted","Data":"76e9e316ba3fb6ff2ba2d9d45b695b94dfbe844bb282d6dc3e711139c8b461b7"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.775460 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" event={"ID":"db36b460-4849-4b66-ad2a-c5f63dab809c","Type":"ContainerStarted","Data":"cde1fa7cf428dc7eec42512a809785fef6c9c5564de1b027bd3552c08b5658ea"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.779205 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" event={"ID":"185aeb05-e73d-4ece-a947-8163702dd545","Type":"ContainerStarted","Data":"3352fceb67167fc7f2e2b1868f024f5aac35505235456264ad53150dbd01aa03"} Dec 09 16:59:14 crc kubenswrapper[4840]: W1209 16:59:14.784073 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2cc781ca_e849_4d81_a786_bb095749564e.slice/crio-d04b9d28a6a1749b729664a947b3bccd37987c4b2de91cd8e5160df385c3459f WatchSource:0}: Error finding container d04b9d28a6a1749b729664a947b3bccd37987c4b2de91cd8e5160df385c3459f: Status 404 returned error can't find the container with id d04b9d28a6a1749b729664a947b3bccd37987c4b2de91cd8e5160df385c3459f Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.792514 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2ckb5"] Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.802205 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" event={"ID":"a3c47983-799f-4700-b733-d040ca0159a3","Type":"ContainerStarted","Data":"2dd7cc814c070d12192d430d46c7523f251f231ad1b5ae59fd4fb74ee2e6e29d"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.802273 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" event={"ID":"a3c47983-799f-4700-b733-d040ca0159a3","Type":"ContainerStarted","Data":"650a5125a45eeef4338c5093aaafd3b3e840b12b7618d6bd3fa8c7fb2d76b99c"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.814349 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" event={"ID":"c6a94168-8373-4b69-ada3-934f7eeeb408","Type":"ContainerStarted","Data":"3d793e7d930f8bfde4f3f028c2bde56984934202f85da5624c1d248089359342"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.838991 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.840907 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:15.340885738 +0000 UTC m=+141.331996371 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.891266 4840 generic.go:334] "Generic (PLEG): container finished" podID="dd8094fb-e45c-481c-99b5-881758870b4b" containerID="0e9cbe49fd86be4fa79bc523df3e0adc2eff9dc1b3f1e657062b68cec90d3b6e" exitCode=0 Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.891325 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" event={"ID":"dd8094fb-e45c-481c-99b5-881758870b4b","Type":"ContainerDied","Data":"0e9cbe49fd86be4fa79bc523df3e0adc2eff9dc1b3f1e657062b68cec90d3b6e"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.893616 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" event={"ID":"8f670d36-8ffd-4608-9e4e-6e4e52e5524c","Type":"ContainerStarted","Data":"9ba96a9617edb2c4c9a319cab219971bf0e4de7a457adba2eb9103c9bb9a700c"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.893636 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" event={"ID":"8f670d36-8ffd-4608-9e4e-6e4e52e5524c","Type":"ContainerStarted","Data":"f7ed3074516935a52a465a1aa3bed3d1f728ca4c24d480b48d30b38f25584fa6"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.896016 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx" event={"ID":"0978ab4b-fdc1-46ac-94e2-ead3135e1ceb","Type":"ContainerStarted","Data":"b46dd2e948ec16505c04bd194cb23790f71cf7fcfcf6588419e690798271cd7d"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.908872 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" event={"ID":"a3649bba-310a-439c-9d29-75c684e6d06a","Type":"ContainerStarted","Data":"dcc327d3531b97073cd7bc8e34dd9e735705453617acc26006af5659db6c6f20"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.938407 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" event={"ID":"64305475-68e4-4261-84eb-f72a300194f6","Type":"ContainerStarted","Data":"8dc6d9c3e481faf89701d6912c49f9754b59cfccaa00aa83efb76832678c8e26"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.938446 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" event={"ID":"64305475-68e4-4261-84eb-f72a300194f6","Type":"ContainerStarted","Data":"d6fe9e9705b994aaee42654aa5e88b7bc75dc63c77d5e5289fd3fa4a22ba450e"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.939224 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.940729 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:14 crc kubenswrapper[4840]: E1209 16:59:14.942128 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:15.442112789 +0000 UTC m=+141.433223522 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.946156 4840 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-zp8rr container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.946198 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" podUID="64305475-68e4-4261-84eb-f72a300194f6" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.965088 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" event={"ID":"706629a8-1b29-4b41-9ab6-15a30dd32bec","Type":"ContainerStarted","Data":"963b1a545917d7a38636836845a30041076dce9e9af535bb9f81880401376711"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.992898 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-h5ltv" Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.992929 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-h5ltv" event={"ID":"acf89e2d-7f49-4872-96dd-41d47629998c","Type":"ContainerStarted","Data":"320cdb8733d2c9b6c244b3a62b206b6c1acc960acad18b4e9656ce8a61e69b48"} Dec 09 16:59:14 crc kubenswrapper[4840]: I1209 16:59:14.998133 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zfvrz"] Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.018707 4840 patch_prober.go:28] interesting pod/downloads-7954f5f757-h5ltv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.018749 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-h5ltv" podUID="acf89e2d-7f49-4872-96dd-41d47629998c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.019591 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" event={"ID":"7d00d8a0-9935-4564-95e9-022e9698358b","Type":"ContainerStarted","Data":"aa421b8d8447ede2e991ff232c3d80c8985c788819931f0edee5ba3a7edd4277"} Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.042885 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:15 crc kubenswrapper[4840]: E1209 16:59:15.044048 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:15.544033172 +0000 UTC m=+141.535143805 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.061764 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-vvcq2"] Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.069477 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-j5vkl"] Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.120160 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6"] Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.125943 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-8x5sv" podStartSLOduration=118.125927004 podStartE2EDuration="1m58.125927004s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:15.125746419 +0000 UTC m=+141.116857062" watchObservedRunningTime="2025-12-09 16:59:15.125927004 +0000 UTC m=+141.117037637" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.145077 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:15 crc kubenswrapper[4840]: E1209 16:59:15.147312 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:15.647300655 +0000 UTC m=+141.638411398 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:15 crc kubenswrapper[4840]: W1209 16:59:15.184931 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod612b0dd9_1b93_4d09_818c_3a139b6f31aa.slice/crio-2d32342daaf115e646e050496944709e5d08fb1121b31571017b47bd76df019d WatchSource:0}: Error finding container 2d32342daaf115e646e050496944709e5d08fb1121b31571017b47bd76df019d: Status 404 returned error can't find the container with id 2d32342daaf115e646e050496944709e5d08fb1121b31571017b47bd76df019d Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.246187 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:15 crc kubenswrapper[4840]: E1209 16:59:15.246340 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:15.746313338 +0000 UTC m=+141.737423971 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.246756 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:15 crc kubenswrapper[4840]: E1209 16:59:15.247082 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:15.747068041 +0000 UTC m=+141.738178674 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.255443 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-z8p7f" podStartSLOduration=118.255429136 podStartE2EDuration="1m58.255429136s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:15.225596678 +0000 UTC m=+141.216707331" watchObservedRunningTime="2025-12-09 16:59:15.255429136 +0000 UTC m=+141.246539769" Dec 09 16:59:15 crc kubenswrapper[4840]: W1209 16:59:15.281300 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcbed776b_da99_4e71_9128_f5bbb8eeb100.slice/crio-d35453d81c59083a747430d05225124de43ae7bed5daac532350211b238f255a WatchSource:0}: Error finding container d35453d81c59083a747430d05225124de43ae7bed5daac532350211b238f255a: Status 404 returned error can't find the container with id d35453d81c59083a747430d05225124de43ae7bed5daac532350211b238f255a Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.353294 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:15 crc kubenswrapper[4840]: E1209 16:59:15.354065 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:15.854049998 +0000 UTC m=+141.845160631 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.394092 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-8x5sv" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.394416 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-thzxd" podStartSLOduration=118.394397686 podStartE2EDuration="1m58.394397686s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:15.352398557 +0000 UTC m=+141.343509180" watchObservedRunningTime="2025-12-09 16:59:15.394397686 +0000 UTC m=+141.385508319" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.436712 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7l2gj" podStartSLOduration=118.436696973 podStartE2EDuration="1m58.436696973s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:15.43462636 +0000 UTC m=+141.425736983" watchObservedRunningTime="2025-12-09 16:59:15.436696973 +0000 UTC m=+141.427807606" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.456108 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:15 crc kubenswrapper[4840]: E1209 16:59:15.457807 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:15.957794275 +0000 UTC m=+141.948904908 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.503987 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" podStartSLOduration=118.50394841 podStartE2EDuration="1m58.50394841s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:15.502456025 +0000 UTC m=+141.493566658" watchObservedRunningTime="2025-12-09 16:59:15.50394841 +0000 UTC m=+141.495059043" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.561181 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:15 crc kubenswrapper[4840]: E1209 16:59:15.561538 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:16.061524853 +0000 UTC m=+142.052635486 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.605945 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.612182 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:15 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:15 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:15 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.612232 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.625353 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-vv579" podStartSLOduration=117.625336465 podStartE2EDuration="1m57.625336465s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:15.622679324 +0000 UTC m=+141.613789957" watchObservedRunningTime="2025-12-09 16:59:15.625336465 +0000 UTC m=+141.616447098" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.662424 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:15 crc kubenswrapper[4840]: E1209 16:59:15.662697 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:16.162686282 +0000 UTC m=+142.153796915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.748526 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-ngfrq" podStartSLOduration=117.748485703 podStartE2EDuration="1m57.748485703s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:15.745890544 +0000 UTC m=+141.737001177" watchObservedRunningTime="2025-12-09 16:59:15.748485703 +0000 UTC m=+141.739596336" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.763279 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:15 crc kubenswrapper[4840]: E1209 16:59:15.763658 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:16.263642044 +0000 UTC m=+142.254752677 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.780557 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" podStartSLOduration=117.780541269 podStartE2EDuration="1m57.780541269s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:15.780286901 +0000 UTC m=+141.771397534" watchObservedRunningTime="2025-12-09 16:59:15.780541269 +0000 UTC m=+141.771651902" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.864381 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:15 crc kubenswrapper[4840]: E1209 16:59:15.864763 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:16.364752212 +0000 UTC m=+142.355862845 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.918533 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-h5ltv" podStartSLOduration=118.918515498 podStartE2EDuration="1m58.918515498s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:15.812684747 +0000 UTC m=+141.803795380" watchObservedRunningTime="2025-12-09 16:59:15.918515498 +0000 UTC m=+141.909626131" Dec 09 16:59:15 crc kubenswrapper[4840]: I1209 16:59:15.967509 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:15 crc kubenswrapper[4840]: E1209 16:59:15.968317 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:16.468302884 +0000 UTC m=+142.459413507 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.036643 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" event={"ID":"2cc781ca-e849-4d81-a786-bb095749564e","Type":"ContainerStarted","Data":"d04b9d28a6a1749b729664a947b3bccd37987c4b2de91cd8e5160df385c3459f"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.040893 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" event={"ID":"612b0dd9-1b93-4d09-818c-3a139b6f31aa","Type":"ContainerStarted","Data":"2d32342daaf115e646e050496944709e5d08fb1121b31571017b47bd76df019d"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.051721 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6" event={"ID":"cbed776b-da99-4e71-9128-f5bbb8eeb100","Type":"ContainerStarted","Data":"d35453d81c59083a747430d05225124de43ae7bed5daac532350211b238f255a"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.069346 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:16 crc kubenswrapper[4840]: E1209 16:59:16.069658 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:16.569646198 +0000 UTC m=+142.560756831 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.083513 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" event={"ID":"706629a8-1b29-4b41-9ab6-15a30dd32bec","Type":"ContainerStarted","Data":"3b02e919e96d11fe66f5f38e37cd0ecb62d2f63c3166059b54ca279b6a7fb7b8"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.083780 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" podStartSLOduration=118.083766078 podStartE2EDuration="1m58.083766078s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:15.918931141 +0000 UTC m=+141.910041774" watchObservedRunningTime="2025-12-09 16:59:16.083766078 +0000 UTC m=+142.074876711" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.084429 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.108560 4840 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-s9lbf container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:5443/healthz\": dial tcp 10.217.0.32:5443: connect: connection refused" start-of-body= Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.108617 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" podUID="706629a8-1b29-4b41-9ab6-15a30dd32bec" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.32:5443/healthz\": dial tcp 10.217.0.32:5443: connect: connection refused" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.120275 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" event={"ID":"1cdfeed7-7f12-43d3-aee8-3a14ab37eac7","Type":"ContainerStarted","Data":"2a9b35ed91e045729f880f4c7ba1739229621daada577d14bf9cd4fc8aea36d4"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.128928 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" event={"ID":"0eb66ea0-ca48-4f56-8911-0a048eb73a04","Type":"ContainerStarted","Data":"eee8f899d6b6f495124f9d26248ee383f1f3791ad85f498f937e46e9d63a2460"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.128983 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" event={"ID":"0eb66ea0-ca48-4f56-8911-0a048eb73a04","Type":"ContainerStarted","Data":"02ee1b64d8a1541406f9f2cc7d77ac01a2563068d8fbf4ace57259f12e8e6b1a"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.132333 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" event={"ID":"38eb149d-5a2c-49e2-9c8b-50e0c720a598","Type":"ContainerStarted","Data":"f37043c8268a4e68dfa887276630fa25379ad0d22ac916e689fcae07ed43e620"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.132362 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" event={"ID":"38eb149d-5a2c-49e2-9c8b-50e0c720a598","Type":"ContainerStarted","Data":"a043172449d06fe036765f27628f06b01b6dc300f0b7cf974ef157e7928e64f4"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.145614 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vvcq2" event={"ID":"4bb6a7d5-bada-4467-90de-dfa4e2490733","Type":"ContainerStarted","Data":"7c9892706f9cb21b883dea589451b1637e129d38895e8aa80c3c1e0f641f13aa"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.169455 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-ngfrq" event={"ID":"f75c37d0-2465-4202-9cf4-981ee305fe89","Type":"ContainerStarted","Data":"30809fba4c5f2f24c52089e4a6275acf146c7c4f421896dacb041d9fdeb37924"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.171299 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:16 crc kubenswrapper[4840]: E1209 16:59:16.174923 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:16.674889092 +0000 UTC m=+142.665999715 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.287907 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" event={"ID":"7d00d8a0-9935-4564-95e9-022e9698358b","Type":"ContainerStarted","Data":"372f77317dc0205d606e8de0fb7afc7627eafad128a0a01fa9ad545f8c4897ce"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.307675 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.319061 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-9vm9j" podStartSLOduration=118.319042119 podStartE2EDuration="1m58.319042119s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:16.288530061 +0000 UTC m=+142.279640694" watchObservedRunningTime="2025-12-09 16:59:16.319042119 +0000 UTC m=+142.310152752" Dec 09 16:59:16 crc kubenswrapper[4840]: E1209 16:59:16.326785 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:16.826768324 +0000 UTC m=+142.817878957 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.337746 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-k4vwg" podStartSLOduration=118.337733248 podStartE2EDuration="1m58.337733248s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:16.337439439 +0000 UTC m=+142.328550082" watchObservedRunningTime="2025-12-09 16:59:16.337733248 +0000 UTC m=+142.328843881" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.340380 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" event={"ID":"a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8","Type":"ContainerStarted","Data":"cca71d4dc0a45a7b96e158e85b784451b5fd5eec6088d98571be474901c512df"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.340436 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" event={"ID":"a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8","Type":"ContainerStarted","Data":"3e3085ce3dfbbaa40563794c4d8ea949ffeebb491e5820c1d2c4e73f34c86e5e"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.356701 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" event={"ID":"9911858a-e920-426e-ae41-c97fb62b70c9","Type":"ContainerStarted","Data":"3d24cce29b18b4c0537fef450863ec6e302ca44d3191f36b129cd7042281858d"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.366533 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" event={"ID":"545b98b0-939f-4840-9d10-57ec468b1d62","Type":"ContainerStarted","Data":"507f172505d964eb9552fe39088b66e9791e43f308c0fdb38b7fe77a64ef88b1"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.366580 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" event={"ID":"545b98b0-939f-4840-9d10-57ec468b1d62","Type":"ContainerStarted","Data":"1e4e776d7bfbb6aeecd33f2f43f12305ee246503ee9519ec38ab7315d132cfe9"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.402744 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" event={"ID":"07b0580c-0d27-48ee-8f33-3c5d7638ac47","Type":"ContainerStarted","Data":"13a6db2af823af09680b6df8c42cb8cd4e3a4c657fc01d752827670e706fa9c3"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.416833 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:16 crc kubenswrapper[4840]: E1209 16:59:16.417236 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:16.917213107 +0000 UTC m=+142.908323740 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.422272 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" event={"ID":"b6292462-00c0-4a89-84a6-88fe8e6fdfca","Type":"ContainerStarted","Data":"20a88fed33ffdbf9d8d7733efdf601163e9cc37ff8394271e99b82ce693ca7d2"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.422363 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" event={"ID":"b6292462-00c0-4a89-84a6-88fe8e6fdfca","Type":"ContainerStarted","Data":"be72931b3f0aa5c0a217069cc8f6ce943bbe42a2ba75f11dd67c879d8c05a3bf"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.431233 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" event={"ID":"229bd7db-2f71-4bbf-baf1-b41a4368b271","Type":"ContainerStarted","Data":"ee8bd048b8ea9546ff3f08ede6a2d8855506b19d53d39e57782beba51f884e48"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.441832 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" podStartSLOduration=119.441816086 podStartE2EDuration="1m59.441816086s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:16.439408933 +0000 UTC m=+142.430519566" watchObservedRunningTime="2025-12-09 16:59:16.441816086 +0000 UTC m=+142.432926719" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.483424 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" event={"ID":"47c87108-afd0-41e4-b1be-8221158b81c4","Type":"ContainerStarted","Data":"e7487bdc018dbefe76b249e3b20e4f1ef4c3a962b1e24cdb93712601f74d50cd"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.496511 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" event={"ID":"64b4f46c-b8bf-469f-8288-701d04a35911","Type":"ContainerStarted","Data":"2c43e5f5ace9d2af39da2e106eb9c6c749a4d1c60480459abed4919780220b07"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.497405 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.519168 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" event={"ID":"49c57793-db28-4be3-81ee-01570255716c","Type":"ContainerStarted","Data":"8277cd82a463dbd44bf2b6af4d657cd7d721680572ef432bf3fe02ac476a287a"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.520213 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:16 crc kubenswrapper[4840]: E1209 16:59:16.521024 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:17.021004896 +0000 UTC m=+143.012115609 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.546427 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.550928 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-h5ltv" event={"ID":"acf89e2d-7f49-4872-96dd-41d47629998c","Type":"ContainerStarted","Data":"acd134a9195ea084a5f3a2c7a386a24b280742fb4fec5de57ae0a644b4467625"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.553642 4840 patch_prober.go:28] interesting pod/downloads-7954f5f757-h5ltv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.553679 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-h5ltv" podUID="acf89e2d-7f49-4872-96dd-41d47629998c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.582513 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" event={"ID":"db36b460-4849-4b66-ad2a-c5f63dab809c","Type":"ContainerStarted","Data":"0cb38f463ee984bb5cace0137c1b604a03bb9d76ede9325fd0c56c82d4ff8a3d"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.582997 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" podStartSLOduration=118.582961982 podStartE2EDuration="1m58.582961982s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:16.491397805 +0000 UTC m=+142.482508438" watchObservedRunningTime="2025-12-09 16:59:16.582961982 +0000 UTC m=+142.574072615" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.587198 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" event={"ID":"a3649bba-310a-439c-9d29-75c684e6d06a","Type":"ContainerStarted","Data":"667af435a1d89a53ded0c4467843278e059f1d88e006a8ea999d133f46618b9e"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.589670 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx" event={"ID":"0978ab4b-fdc1-46ac-94e2-ead3135e1ceb","Type":"ContainerStarted","Data":"b9a8410bd2e425ea1bd25b25c49e6d4922c4dded79ab8405bd57da03240c6261"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.611219 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:16 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:16 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:16 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.611274 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.625110 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:16 crc kubenswrapper[4840]: E1209 16:59:16.626603 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:17.12658404 +0000 UTC m=+143.117694683 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.627089 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" event={"ID":"3cc0c386-8560-4984-aba9-a2c7f7eb6c2a","Type":"ContainerStarted","Data":"bc04b702ad2cc6eef51f7aa7611e09379fea70eb17b923f5fb2143c6a5c69950"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.627128 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" event={"ID":"3cc0c386-8560-4984-aba9-a2c7f7eb6c2a","Type":"ContainerStarted","Data":"9f25a100fe1e6ad419270deb4bc89da8effb8a874a14768ed9bffb32dd681901"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.627263 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-8v7mq" event={"ID":"bbe4e537-04ac-420c-9c31-ae521a1e8de2","Type":"ContainerStarted","Data":"eb6c77ab08c40ec3e90133080cf8893069f4d135ec7d9582b92328ca8f7a48cc"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.647718 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5g2d" podStartSLOduration=119.647701833 podStartE2EDuration="1m59.647701833s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:16.581550809 +0000 UTC m=+142.572661442" watchObservedRunningTime="2025-12-09 16:59:16.647701833 +0000 UTC m=+142.638812466" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.651358 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-wlgg5" event={"ID":"15620c93-7aea-4b1f-9073-af5d15bc231d","Type":"ContainerStarted","Data":"c89e87f3a89ae68d84229b9b3547e2ea173ad7961963a708a8440635e0973e10"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.651603 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-wlgg5" event={"ID":"15620c93-7aea-4b1f-9073-af5d15bc231d","Type":"ContainerStarted","Data":"8e42fd6f73483fd2e55503b24fae1660e4c006b498119f583276cf44ed9c23e5"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.695552 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" event={"ID":"c6a94168-8373-4b69-ada3-934f7eeeb408","Type":"ContainerStarted","Data":"27cb0e0b7649114059c7db0afda2ebd5b6f4fd8d4be694044406183083ee5c75"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.701981 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" event={"ID":"185aeb05-e73d-4ece-a947-8163702dd545","Type":"ContainerStarted","Data":"4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.702467 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.715121 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" event={"ID":"1aa9c070-6503-4910-939a-1cb223568209","Type":"ContainerStarted","Data":"97f7808c68194daac91a949b8cd7d95d56f17507eba3d4df7107d1126ef2af60"} Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.716687 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-phbcj" podStartSLOduration=118.716667112 podStartE2EDuration="1m58.716667112s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:16.65188083 +0000 UTC m=+142.642991463" watchObservedRunningTime="2025-12-09 16:59:16.716667112 +0000 UTC m=+142.707777735" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.726722 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.726868 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.727903 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-b9k6h" podStartSLOduration=118.727892674 podStartE2EDuration="1m58.727892674s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:16.722642474 +0000 UTC m=+142.713753107" watchObservedRunningTime="2025-12-09 16:59:16.727892674 +0000 UTC m=+142.719003297" Dec 09 16:59:16 crc kubenswrapper[4840]: E1209 16:59:16.728984 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:17.228945316 +0000 UTC m=+143.220056009 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.730302 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.737017 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-zp8rr" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.776546 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" podStartSLOduration=118.776524714 podStartE2EDuration="1m58.776524714s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:16.769529931 +0000 UTC m=+142.760640564" watchObservedRunningTime="2025-12-09 16:59:16.776524714 +0000 UTC m=+142.767635347" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.828513 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:16 crc kubenswrapper[4840]: E1209 16:59:16.830349 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:17.330334522 +0000 UTC m=+143.321445155 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.903297 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.913838 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-g5dvm" podStartSLOduration=118.913815522 podStartE2EDuration="1m58.913815522s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:16.868835003 +0000 UTC m=+142.859945636" watchObservedRunningTime="2025-12-09 16:59:16.913815522 +0000 UTC m=+142.904926155" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.915038 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhqph" podStartSLOduration=118.91503045 podStartE2EDuration="1m58.91503045s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:16.898186737 +0000 UTC m=+142.889297370" watchObservedRunningTime="2025-12-09 16:59:16.91503045 +0000 UTC m=+142.906141083" Dec 09 16:59:16 crc kubenswrapper[4840]: I1209 16:59:16.931712 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:16 crc kubenswrapper[4840]: E1209 16:59:16.932006 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:17.431988346 +0000 UTC m=+143.423098979 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.028585 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" podStartSLOduration=119.028567585 podStartE2EDuration="1m59.028567585s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:16.987672221 +0000 UTC m=+142.978782854" watchObservedRunningTime="2025-12-09 16:59:17.028567585 +0000 UTC m=+143.019678218" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.033916 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.034316 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:17.53430203 +0000 UTC m=+143.525412663 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.090231 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p4bwb" podStartSLOduration=119.090215442 podStartE2EDuration="1m59.090215442s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:17.027374629 +0000 UTC m=+143.018485262" watchObservedRunningTime="2025-12-09 16:59:17.090215442 +0000 UTC m=+143.081326075" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.136721 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.137092 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:17.637080168 +0000 UTC m=+143.628190801 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.142537 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" podStartSLOduration=119.142522394 podStartE2EDuration="1m59.142522394s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:17.091857462 +0000 UTC m=+143.082968095" watchObservedRunningTime="2025-12-09 16:59:17.142522394 +0000 UTC m=+143.133633027" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.143010 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-wlgg5" podStartSLOduration=7.143006149 podStartE2EDuration="7.143006149s" podCreationTimestamp="2025-12-09 16:59:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:17.14041249 +0000 UTC m=+143.131523123" watchObservedRunningTime="2025-12-09 16:59:17.143006149 +0000 UTC m=+143.134116782" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.233563 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mtqnx" podStartSLOduration=119.233544734 podStartE2EDuration="1m59.233544734s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:17.215046181 +0000 UTC m=+143.206156814" watchObservedRunningTime="2025-12-09 16:59:17.233544734 +0000 UTC m=+143.224655367" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.237869 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.238300 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:17.738280648 +0000 UTC m=+143.729391281 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.266859 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-f547s" podStartSLOduration=119.266844477 podStartE2EDuration="1m59.266844477s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:17.266462405 +0000 UTC m=+143.257573038" watchObservedRunningTime="2025-12-09 16:59:17.266844477 +0000 UTC m=+143.257955110" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.339856 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.340220 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:17.84021047 +0000 UTC m=+143.831321093 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.346390 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-8v7mq" podStartSLOduration=7.346375707 podStartE2EDuration="7.346375707s" podCreationTimestamp="2025-12-09 16:59:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:17.344287494 +0000 UTC m=+143.335398127" watchObservedRunningTime="2025-12-09 16:59:17.346375707 +0000 UTC m=+143.337486340" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.444161 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" podStartSLOduration=119.444147103 podStartE2EDuration="1m59.444147103s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:17.388410117 +0000 UTC m=+143.379520750" watchObservedRunningTime="2025-12-09 16:59:17.444147103 +0000 UTC m=+143.435257736" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.447426 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.447516 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:17.947497935 +0000 UTC m=+143.938608568 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.447633 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.447907 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:17.947900478 +0000 UTC m=+143.939011111 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.550553 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.551135 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.051119469 +0000 UTC m=+144.042230092 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.606843 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:17 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:17 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:17 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.606892 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.654704 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.655122 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.155107174 +0000 UTC m=+144.146217807 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.735469 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" event={"ID":"9911858a-e920-426e-ae41-c97fb62b70c9","Type":"ContainerStarted","Data":"7c4d7f48f1685f3e34942523949788c3a6a1db20b78979e92891f483436afec8"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.743725 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" event={"ID":"dd8094fb-e45c-481c-99b5-881758870b4b","Type":"ContainerStarted","Data":"369e19177c254bcdfc52591956d98bcc539742a35cae389c8e5aeda3139af0de"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.743863 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.748926 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" event={"ID":"229bd7db-2f71-4bbf-baf1-b41a4368b271","Type":"ContainerStarted","Data":"ef94cc4b65b931d4443855f4d5ceaa516bc5f0b4824235b5b986fc0acca8afc2"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.748971 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" event={"ID":"229bd7db-2f71-4bbf-baf1-b41a4368b271","Type":"ContainerStarted","Data":"78e5dde4a972cedaf794eae3132beb2beb5fc9cb4b6cdd78103d89145785e29b"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.755788 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.755997 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.255955204 +0000 UTC m=+144.247065837 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.756184 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.756540 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.256533062 +0000 UTC m=+144.247643695 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.757318 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" event={"ID":"07b0580c-0d27-48ee-8f33-3c5d7638ac47","Type":"ContainerStarted","Data":"d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.758025 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.758831 4840 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2ckb5 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.758868 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" podUID="07b0580c-0d27-48ee-8f33-3c5d7638ac47" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.764614 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-nw6vb" event={"ID":"1aa9c070-6503-4910-939a-1cb223568209","Type":"ContainerStarted","Data":"636297a6616848ba02994835df6c18ab23a56c72a2c0834fb837bbece65257ec"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.767323 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" event={"ID":"612b0dd9-1b93-4d09-818c-3a139b6f31aa","Type":"ContainerStarted","Data":"6eb3af8abba3b3192d6d444b81590ebc107e31ca14c78db0c3296ef6c3ff50fd"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.772876 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" event={"ID":"a00d2d49-aeb5-41a8-a5c5-6a9e6627fdc8","Type":"ContainerStarted","Data":"00248aef52a7b437a9d1f17ece66e16659eb0b49134131051773aa0c8ba4f906"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.772952 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.774592 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" event={"ID":"b6292462-00c0-4a89-84a6-88fe8e6fdfca","Type":"ContainerStarted","Data":"9b66057fd82e33bc33c862f8316d3f735f91d2870cd6fe5137f8104715a82847"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.779119 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-tf562" event={"ID":"c6a94168-8373-4b69-ada3-934f7eeeb408","Type":"ContainerStarted","Data":"e8ce6e1c13d02922f07e2478cd464b9ba9767f24e5aec64951b62d99837cecab"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.784456 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4whfq" event={"ID":"30551113-d3e0-4335-910a-433ea706e8e2","Type":"ContainerStarted","Data":"182c1b133c0850e8c859613ecff8e264375f1995efe05af96ed171f3301c9805"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.784510 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4whfq" event={"ID":"30551113-d3e0-4335-910a-433ea706e8e2","Type":"ContainerStarted","Data":"73ddcbfdefd08f9fd2ab48762e5c5d73c3f57e1928e8a0f5d148b87590a5bb06"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.789228 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6" event={"ID":"cbed776b-da99-4e71-9128-f5bbb8eeb100","Type":"ContainerStarted","Data":"459b07855ea8bf94545fcb00a8d6804620e23c88e6fb1e03fae11faf960de68c"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.789284 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6" event={"ID":"cbed776b-da99-4e71-9128-f5bbb8eeb100","Type":"ContainerStarted","Data":"b8b636f4acf9266d52835322939d5cda7ab38a5f80e9c453f919a54dddf9fa97"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.792701 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bxntg" event={"ID":"a3649bba-310a-439c-9d29-75c684e6d06a","Type":"ContainerStarted","Data":"1b79fe4891d7efd36bff962b972725e7dd516c3d760fa2f6bfea7f0ce4c118dd"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.803100 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" event={"ID":"2cc781ca-e849-4d81-a786-bb095749564e","Type":"ContainerStarted","Data":"efa0d172b473872abea4e13b5cd437ec8bfa054a50fef938ec6e009e3515d674"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.815129 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vvcq2" event={"ID":"4bb6a7d5-bada-4467-90de-dfa4e2490733","Type":"ContainerStarted","Data":"dcd87628f4ce32352a53d3b9cb7999bf8381aada87fca7f3dc6f37daf1244e0c"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.815167 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vvcq2" event={"ID":"4bb6a7d5-bada-4467-90de-dfa4e2490733","Type":"ContainerStarted","Data":"a41ae91dc7371c8624ed7af528128861e516f72604917c5a0a61b81af8449466"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.815679 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.827718 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" event={"ID":"25ed38df-aac2-41d6-a51b-694af6b9cbf3","Type":"ContainerStarted","Data":"6dee308d0819b507d5a382108aa58b7ccd3ff30be63ec27f8ddc0d72c136fd1b"} Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.828482 4840 patch_prober.go:28] interesting pod/downloads-7954f5f757-h5ltv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.828520 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-h5ltv" podUID="acf89e2d-7f49-4872-96dd-41d47629998c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.857096 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.859072 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.359046032 +0000 UTC m=+144.350156695 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.877732 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" podStartSLOduration=120.87771188 podStartE2EDuration="2m0.87771188s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:17.824710217 +0000 UTC m=+143.815820850" watchObservedRunningTime="2025-12-09 16:59:17.87771188 +0000 UTC m=+143.868822513" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.899095 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-5qngm" podStartSLOduration=119.89906721 podStartE2EDuration="1m59.89906721s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:17.888361304 +0000 UTC m=+143.879471937" watchObservedRunningTime="2025-12-09 16:59:17.89906721 +0000 UTC m=+143.890177893" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.924830 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" podStartSLOduration=119.924816594 podStartE2EDuration="1m59.924816594s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:17.922874885 +0000 UTC m=+143.913985508" watchObservedRunningTime="2025-12-09 16:59:17.924816594 +0000 UTC m=+143.915927227" Dec 09 16:59:17 crc kubenswrapper[4840]: I1209 16:59:17.962261 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:17 crc kubenswrapper[4840]: E1209 16:59:17.978713 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.478694774 +0000 UTC m=+144.469805407 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.016975 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-njt76" podStartSLOduration=120.016940858 podStartE2EDuration="2m0.016940858s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:17.957984533 +0000 UTC m=+143.949095166" watchObservedRunningTime="2025-12-09 16:59:18.016940858 +0000 UTC m=+144.008051491" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.045943 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2x6tv" podStartSLOduration=120.04592638 podStartE2EDuration="2m0.04592638s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:18.044681452 +0000 UTC m=+144.035792085" watchObservedRunningTime="2025-12-09 16:59:18.04592638 +0000 UTC m=+144.037037003" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.046983 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-crvj6" podStartSLOduration=120.046977042 podStartE2EDuration="2m0.046977042s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:18.019233397 +0000 UTC m=+144.010344030" watchObservedRunningTime="2025-12-09 16:59:18.046977042 +0000 UTC m=+144.038087675" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.065392 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.065802 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.565785724 +0000 UTC m=+144.556896357 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.123475 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-vvcq2" podStartSLOduration=8.12345457 podStartE2EDuration="8.12345457s" podCreationTimestamp="2025-12-09 16:59:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:18.071323373 +0000 UTC m=+144.062434006" watchObservedRunningTime="2025-12-09 16:59:18.12345457 +0000 UTC m=+144.114565203" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.141647 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" podStartSLOduration=120.141629933 podStartE2EDuration="2m0.141629933s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:18.131571837 +0000 UTC m=+144.122682470" watchObservedRunningTime="2025-12-09 16:59:18.141629933 +0000 UTC m=+144.132740566" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.141911 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-br44r"] Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.142808 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-br44r" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.150257 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.169928 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.170231 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.670219473 +0000 UTC m=+144.661330106 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.174416 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-4whfq" podStartSLOduration=121.17440635 podStartE2EDuration="2m1.17440635s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:18.170705768 +0000 UTC m=+144.161816401" watchObservedRunningTime="2025-12-09 16:59:18.17440635 +0000 UTC m=+144.165516983" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.175393 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-br44r"] Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.210638 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" podStartSLOduration=120.210618393 podStartE2EDuration="2m0.210618393s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:18.208456667 +0000 UTC m=+144.199567300" watchObservedRunningTime="2025-12-09 16:59:18.210618393 +0000 UTC m=+144.201729026" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.234191 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-zfvrz" podStartSLOduration=120.23417648 podStartE2EDuration="2m0.23417648s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:18.231311362 +0000 UTC m=+144.222421995" watchObservedRunningTime="2025-12-09 16:59:18.23417648 +0000 UTC m=+144.225287113" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.271519 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.271691 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-catalog-content\") pod \"certified-operators-br44r\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " pod="openshift-marketplace/certified-operators-br44r" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.271778 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wv7q5\" (UniqueName: \"kubernetes.io/projected/d12f03b6-5a9d-479e-9e73-2f2476161d97-kube-api-access-wv7q5\") pod \"certified-operators-br44r\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " pod="openshift-marketplace/certified-operators-br44r" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.271819 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-utilities\") pod \"certified-operators-br44r\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " pod="openshift-marketplace/certified-operators-br44r" Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.271947 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.771929179 +0000 UTC m=+144.763039812 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.327993 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rgpwz"] Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.328878 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rgpwz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.333428 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.348036 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rgpwz"] Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.373366 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wv7q5\" (UniqueName: \"kubernetes.io/projected/d12f03b6-5a9d-479e-9e73-2f2476161d97-kube-api-access-wv7q5\") pod \"certified-operators-br44r\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " pod="openshift-marketplace/certified-operators-br44r" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.373417 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-utilities\") pod \"certified-operators-br44r\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " pod="openshift-marketplace/certified-operators-br44r" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.373440 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.373479 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-utilities\") pod \"community-operators-rgpwz\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " pod="openshift-marketplace/community-operators-rgpwz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.373509 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-catalog-content\") pod \"certified-operators-br44r\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " pod="openshift-marketplace/certified-operators-br44r" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.373552 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-catalog-content\") pod \"community-operators-rgpwz\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " pod="openshift-marketplace/community-operators-rgpwz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.373608 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7d8ps\" (UniqueName: \"kubernetes.io/projected/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-kube-api-access-7d8ps\") pod \"community-operators-rgpwz\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " pod="openshift-marketplace/community-operators-rgpwz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.374233 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-utilities\") pod \"certified-operators-br44r\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " pod="openshift-marketplace/certified-operators-br44r" Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.374457 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.874447299 +0000 UTC m=+144.865557932 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.374792 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-catalog-content\") pod \"certified-operators-br44r\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " pod="openshift-marketplace/certified-operators-br44r" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.410327 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wv7q5\" (UniqueName: \"kubernetes.io/projected/d12f03b6-5a9d-479e-9e73-2f2476161d97-kube-api-access-wv7q5\") pod \"certified-operators-br44r\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " pod="openshift-marketplace/certified-operators-br44r" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.474889 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.475151 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.975123683 +0000 UTC m=+144.966234316 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.475406 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7d8ps\" (UniqueName: \"kubernetes.io/projected/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-kube-api-access-7d8ps\") pod \"community-operators-rgpwz\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " pod="openshift-marketplace/community-operators-rgpwz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.475518 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.475628 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-utilities\") pod \"community-operators-rgpwz\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " pod="openshift-marketplace/community-operators-rgpwz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.475731 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-catalog-content\") pod \"community-operators-rgpwz\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " pod="openshift-marketplace/community-operators-rgpwz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.476220 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-catalog-content\") pod \"community-operators-rgpwz\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " pod="openshift-marketplace/community-operators-rgpwz" Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.476746 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:18.976735432 +0000 UTC m=+144.967846066 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.477194 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-utilities\") pod \"community-operators-rgpwz\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " pod="openshift-marketplace/community-operators-rgpwz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.494111 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-br44r" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.505456 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7d8ps\" (UniqueName: \"kubernetes.io/projected/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-kube-api-access-7d8ps\") pod \"community-operators-rgpwz\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " pod="openshift-marketplace/community-operators-rgpwz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.512657 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-78kws"] Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.513748 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-78kws" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.549140 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-78kws"] Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.584385 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.584600 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-utilities\") pod \"certified-operators-78kws\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " pod="openshift-marketplace/certified-operators-78kws" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.584668 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkdgq\" (UniqueName: \"kubernetes.io/projected/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-kube-api-access-dkdgq\") pod \"certified-operators-78kws\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " pod="openshift-marketplace/certified-operators-78kws" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.584699 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-catalog-content\") pod \"certified-operators-78kws\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " pod="openshift-marketplace/certified-operators-78kws" Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.584883 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:19.084869094 +0000 UTC m=+145.075979727 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.607842 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:18 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:18 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:18 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.607909 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.645784 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rgpwz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.675098 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-s9lbf" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.686053 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkdgq\" (UniqueName: \"kubernetes.io/projected/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-kube-api-access-dkdgq\") pod \"certified-operators-78kws\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " pod="openshift-marketplace/certified-operators-78kws" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.686111 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-catalog-content\") pod \"certified-operators-78kws\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " pod="openshift-marketplace/certified-operators-78kws" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.686130 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.686179 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-utilities\") pod \"certified-operators-78kws\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " pod="openshift-marketplace/certified-operators-78kws" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.686580 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-utilities\") pod \"certified-operators-78kws\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " pod="openshift-marketplace/certified-operators-78kws" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.687044 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-catalog-content\") pod \"certified-operators-78kws\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " pod="openshift-marketplace/certified-operators-78kws" Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.687254 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:19.18724431 +0000 UTC m=+145.178354943 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.725291 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkdgq\" (UniqueName: \"kubernetes.io/projected/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-kube-api-access-dkdgq\") pod \"certified-operators-78kws\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " pod="openshift-marketplace/certified-operators-78kws" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.727023 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pcvgz"] Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.728046 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pcvgz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.749930 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pcvgz"] Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.787819 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.788172 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:19.288155261 +0000 UTC m=+145.279265894 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.788460 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.788554 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-catalog-content\") pod \"community-operators-pcvgz\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " pod="openshift-marketplace/community-operators-pcvgz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.788648 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8cmr\" (UniqueName: \"kubernetes.io/projected/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-kube-api-access-h8cmr\") pod \"community-operators-pcvgz\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " pod="openshift-marketplace/community-operators-pcvgz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.788737 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-utilities\") pod \"community-operators-pcvgz\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " pod="openshift-marketplace/community-operators-pcvgz" Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.789066 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:19.289058329 +0000 UTC m=+145.280168962 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.858215 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-78kws" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.859393 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" event={"ID":"9911858a-e920-426e-ae41-c97fb62b70c9","Type":"ContainerStarted","Data":"c8cf45c0ef6585267b053f1353b1f6996f05b0c227527fa21054024ba0f493f2"} Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.861593 4840 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2ckb5 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.861616 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" podUID="07b0580c-0d27-48ee-8f33-3c5d7638ac47" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.889770 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.890138 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-utilities\") pod \"community-operators-pcvgz\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " pod="openshift-marketplace/community-operators-pcvgz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.890285 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-catalog-content\") pod \"community-operators-pcvgz\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " pod="openshift-marketplace/community-operators-pcvgz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.890380 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8cmr\" (UniqueName: \"kubernetes.io/projected/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-kube-api-access-h8cmr\") pod \"community-operators-pcvgz\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " pod="openshift-marketplace/community-operators-pcvgz" Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.890762 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:19.390748194 +0000 UTC m=+145.381858827 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.891211 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-utilities\") pod \"community-operators-pcvgz\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " pod="openshift-marketplace/community-operators-pcvgz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.891362 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-catalog-content\") pod \"community-operators-pcvgz\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " pod="openshift-marketplace/community-operators-pcvgz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.960863 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8cmr\" (UniqueName: \"kubernetes.io/projected/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-kube-api-access-h8cmr\") pod \"community-operators-pcvgz\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " pod="openshift-marketplace/community-operators-pcvgz" Dec 09 16:59:18 crc kubenswrapper[4840]: I1209 16:59:18.991574 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:18 crc kubenswrapper[4840]: E1209 16:59:18.993096 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:19.493081349 +0000 UTC m=+145.484191982 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.078218 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pcvgz" Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.099502 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:19 crc kubenswrapper[4840]: E1209 16:59:19.099809 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:19.599785386 +0000 UTC m=+145.590896009 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.200677 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:19 crc kubenswrapper[4840]: E1209 16:59:19.201009 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:19.700996927 +0000 UTC m=+145.692107560 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.270658 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rgpwz"] Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.302266 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:19 crc kubenswrapper[4840]: E1209 16:59:19.303030 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:19.803014052 +0000 UTC m=+145.794124685 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.404602 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:19 crc kubenswrapper[4840]: E1209 16:59:19.405061 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:19.905045508 +0000 UTC m=+145.896156141 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.465173 4840 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.506425 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:19 crc kubenswrapper[4840]: E1209 16:59:19.506624 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:20.006586088 +0000 UTC m=+145.997696721 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.506874 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:19 crc kubenswrapper[4840]: E1209 16:59:19.507230 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:20.007222188 +0000 UTC m=+145.998332821 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.575321 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-br44r"] Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.607578 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.616522 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-78kws"] Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.616884 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:19 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:19 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:19 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.617076 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:19 crc kubenswrapper[4840]: E1209 16:59:19.617305 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:20.117288248 +0000 UTC m=+146.108398881 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.629374 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:19 crc kubenswrapper[4840]: E1209 16:59:19.636142 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 16:59:20.136124381 +0000 UTC m=+146.127235014 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bczl5" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.739547 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:19 crc kubenswrapper[4840]: E1209 16:59:19.739855 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 16:59:20.239840648 +0000 UTC m=+146.230951281 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.813174 4840 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-12-09T16:59:19.465199619Z","Handler":null,"Name":""} Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.821814 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pcvgz"] Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.829506 4840 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.829546 4840 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.842401 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.869623 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.869673 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.880844 4840 generic.go:334] "Generic (PLEG): container finished" podID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" containerID="b0be5c249b1d39ca493dd022e75fea525c8dd388fd46e101699352f5d9603753" exitCode=0 Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.880914 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rgpwz" event={"ID":"470d5b30-6a3d-4d02-9ef4-ce35ea66af80","Type":"ContainerDied","Data":"b0be5c249b1d39ca493dd022e75fea525c8dd388fd46e101699352f5d9603753"} Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.880938 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rgpwz" event={"ID":"470d5b30-6a3d-4d02-9ef4-ce35ea66af80","Type":"ContainerStarted","Data":"f497737cfe08b09b02eab01cc5a16700889e8af695db1542d37dfa1541d40cf9"} Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.887683 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.899957 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" event={"ID":"9911858a-e920-426e-ae41-c97fb62b70c9","Type":"ContainerStarted","Data":"f9ea82e700c5d464d0272c94f7fee5fcec19e50c54e77d44dc7e350877081b48"} Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.906225 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcvgz" event={"ID":"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7","Type":"ContainerStarted","Data":"bf4cf7db369f26800bc5d8a3f6f9574989722631093dd532faca1027601c80dd"} Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.907352 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-br44r" event={"ID":"d12f03b6-5a9d-479e-9e73-2f2476161d97","Type":"ContainerStarted","Data":"5f62ad2d5bae7ccd19d244b186730c7a088fd62a99a9f37a8bb49629306fee3a"} Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.909757 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78kws" event={"ID":"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f","Type":"ContainerStarted","Data":"86999334f9bd87b1f7c4086e3783d7ea8d4ddbda774abf4902a7259fd2e9f03a"} Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.912428 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.913942 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.914774 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.917344 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.918585 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Dec 09 16:59:19 crc kubenswrapper[4840]: I1209 16:59:19.922272 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.046040 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52a55433-0acc-41ad-9132-31cfb5375627-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"52a55433-0acc-41ad-9132-31cfb5375627\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.046087 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52a55433-0acc-41ad-9132-31cfb5375627-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"52a55433-0acc-41ad-9132-31cfb5375627\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.105650 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xpnvf"] Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.106533 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.108642 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.121011 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xpnvf"] Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.147118 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52a55433-0acc-41ad-9132-31cfb5375627-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"52a55433-0acc-41ad-9132-31cfb5375627\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.147155 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52a55433-0acc-41ad-9132-31cfb5375627-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"52a55433-0acc-41ad-9132-31cfb5375627\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.147187 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52a55433-0acc-41ad-9132-31cfb5375627-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"52a55433-0acc-41ad-9132-31cfb5375627\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.152310 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bczl5\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.167184 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52a55433-0acc-41ad-9132-31cfb5375627-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"52a55433-0acc-41ad-9132-31cfb5375627\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.228153 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.243704 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.247438 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.247699 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-catalog-content\") pod \"redhat-marketplace-xpnvf\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.247752 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-utilities\") pod \"redhat-marketplace-xpnvf\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.247899 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ngxr\" (UniqueName: \"kubernetes.io/projected/7a467268-207b-41e8-927d-8bb4ce05c367-kube-api-access-5ngxr\") pod \"redhat-marketplace-xpnvf\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.261611 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.352952 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-catalog-content\") pod \"redhat-marketplace-xpnvf\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.353551 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-catalog-content\") pod \"redhat-marketplace-xpnvf\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.353557 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-utilities\") pod \"redhat-marketplace-xpnvf\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.353762 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ngxr\" (UniqueName: \"kubernetes.io/projected/7a467268-207b-41e8-927d-8bb4ce05c367-kube-api-access-5ngxr\") pod \"redhat-marketplace-xpnvf\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.354054 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-utilities\") pod \"redhat-marketplace-xpnvf\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.398844 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ngxr\" (UniqueName: \"kubernetes.io/projected/7a467268-207b-41e8-927d-8bb4ce05c367-kube-api-access-5ngxr\") pod \"redhat-marketplace-xpnvf\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.418226 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.447552 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bczl5"] Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.537168 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p5t7p"] Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.553411 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.575701 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p5t7p"] Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.601038 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.605222 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:20 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:20 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:20 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.605264 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.655926 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.668649 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d88z5\" (UniqueName: \"kubernetes.io/projected/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-kube-api-access-d88z5\") pod \"redhat-marketplace-p5t7p\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.668716 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-catalog-content\") pod \"redhat-marketplace-p5t7p\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.668746 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-utilities\") pod \"redhat-marketplace-p5t7p\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.693884 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xpnvf"] Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.771084 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d88z5\" (UniqueName: \"kubernetes.io/projected/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-kube-api-access-d88z5\") pod \"redhat-marketplace-p5t7p\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.771167 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-catalog-content\") pod \"redhat-marketplace-p5t7p\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.771208 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-utilities\") pod \"redhat-marketplace-p5t7p\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.771670 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-utilities\") pod \"redhat-marketplace-p5t7p\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.772410 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-catalog-content\") pod \"redhat-marketplace-p5t7p\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.794523 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d88z5\" (UniqueName: \"kubernetes.io/projected/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-kube-api-access-d88z5\") pod \"redhat-marketplace-p5t7p\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.883682 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.924046 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpnvf" event={"ID":"7a467268-207b-41e8-927d-8bb4ce05c367","Type":"ContainerStarted","Data":"f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906"} Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.924101 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpnvf" event={"ID":"7a467268-207b-41e8-927d-8bb4ce05c367","Type":"ContainerStarted","Data":"de2310ad95f7f0966e2f47d1b7508945502cf082c80befb40fee228444b66610"} Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.926083 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" event={"ID":"c648416a-e4c7-4ce4-97e5-33393cead15e","Type":"ContainerStarted","Data":"be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53"} Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.926126 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" event={"ID":"c648416a-e4c7-4ce4-97e5-33393cead15e","Type":"ContainerStarted","Data":"80f4c4336073bcafd279da52282a8e7db8f504efefafb31fe888081272f0dbd0"} Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.926179 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.929459 4840 generic.go:334] "Generic (PLEG): container finished" podID="0eb66ea0-ca48-4f56-8911-0a048eb73a04" containerID="eee8f899d6b6f495124f9d26248ee383f1f3791ad85f498f937e46e9d63a2460" exitCode=0 Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.929513 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" event={"ID":"0eb66ea0-ca48-4f56-8911-0a048eb73a04","Type":"ContainerDied","Data":"eee8f899d6b6f495124f9d26248ee383f1f3791ad85f498f937e46e9d63a2460"} Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.946448 4840 generic.go:334] "Generic (PLEG): container finished" podID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" containerID="d44d2c88bb49fde2f54c77eca08aebc9a7acf1225f5bc2bb2b45f2e7a9e08eff" exitCode=0 Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.946548 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78kws" event={"ID":"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f","Type":"ContainerDied","Data":"d44d2c88bb49fde2f54c77eca08aebc9a7acf1225f5bc2bb2b45f2e7a9e08eff"} Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.948499 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"52a55433-0acc-41ad-9132-31cfb5375627","Type":"ContainerStarted","Data":"b20f417b5bbec212d7e5af9e95509c4983e4196e28bc03b64b5a78cc1096a7bd"} Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.950660 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" event={"ID":"9911858a-e920-426e-ae41-c97fb62b70c9","Type":"ContainerStarted","Data":"074cdc2259bce8d0553564968e97e8da3610a88c67dac9dc6c91ce96ba611a56"} Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.952900 4840 generic.go:334] "Generic (PLEG): container finished" podID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" containerID="e4a1cc3d6bf4176bb8d03076c9619ab068d9f21d8f4ae270f96a407529077137" exitCode=0 Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.952978 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcvgz" event={"ID":"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7","Type":"ContainerDied","Data":"e4a1cc3d6bf4176bb8d03076c9619ab068d9f21d8f4ae270f96a407529077137"} Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.954555 4840 generic.go:334] "Generic (PLEG): container finished" podID="d12f03b6-5a9d-479e-9e73-2f2476161d97" containerID="c3d77d1e2f636ea7b850e1fcb0291b0d27e3ead1dbebae2d74286eed4c725f63" exitCode=0 Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.956359 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-br44r" event={"ID":"d12f03b6-5a9d-479e-9e73-2f2476161d97","Type":"ContainerDied","Data":"c3d77d1e2f636ea7b850e1fcb0291b0d27e3ead1dbebae2d74286eed4c725f63"} Dec 09 16:59:20 crc kubenswrapper[4840]: I1209 16:59:20.981265 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" podStartSLOduration=122.981250922 podStartE2EDuration="2m2.981250922s" podCreationTimestamp="2025-12-09 16:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:20.978345424 +0000 UTC m=+146.969456077" watchObservedRunningTime="2025-12-09 16:59:20.981250922 +0000 UTC m=+146.972361555" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.050481 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-j5vkl" podStartSLOduration=11.050463439 podStartE2EDuration="11.050463439s" podCreationTimestamp="2025-12-09 16:59:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:21.049784668 +0000 UTC m=+147.040895291" watchObservedRunningTime="2025-12-09 16:59:21.050463439 +0000 UTC m=+147.041574062" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.335281 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p5t7p"] Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.538524 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-w2jxv"] Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.539864 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.541826 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.554467 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w2jxv"] Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.585038 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.585089 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.585992 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.604119 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:21 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:21 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:21 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.604167 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.606132 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.686899 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-utilities\") pod \"redhat-operators-w2jxv\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.686999 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.687023 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tn8b5\" (UniqueName: \"kubernetes.io/projected/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-kube-api-access-tn8b5\") pod \"redhat-operators-w2jxv\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.687046 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.687064 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-catalog-content\") pod \"redhat-operators-w2jxv\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.692999 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.694221 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.788748 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-utilities\") pod \"redhat-operators-w2jxv\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.788847 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tn8b5\" (UniqueName: \"kubernetes.io/projected/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-kube-api-access-tn8b5\") pod \"redhat-operators-w2jxv\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.788884 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-catalog-content\") pod \"redhat-operators-w2jxv\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.789596 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-catalog-content\") pod \"redhat-operators-w2jxv\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.789650 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-utilities\") pod \"redhat-operators-w2jxv\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.806866 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tn8b5\" (UniqueName: \"kubernetes.io/projected/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-kube-api-access-tn8b5\") pod \"redhat-operators-w2jxv\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.836867 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.859300 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.859526 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-b82t4" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.859984 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.872191 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.921579 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xjwsx"] Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.923702 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.957149 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xjwsx"] Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.996729 4840 generic.go:334] "Generic (PLEG): container finished" podID="52a55433-0acc-41ad-9132-31cfb5375627" containerID="e4460a131c2ce95f267b0ccfd9aaf2f9a8bc9c4ec25ac4f597f3c4556190ec17" exitCode=0 Dec 09 16:59:21 crc kubenswrapper[4840]: I1209 16:59:21.996810 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"52a55433-0acc-41ad-9132-31cfb5375627","Type":"ContainerDied","Data":"e4460a131c2ce95f267b0ccfd9aaf2f9a8bc9c4ec25ac4f597f3c4556190ec17"} Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.003271 4840 generic.go:334] "Generic (PLEG): container finished" podID="7a467268-207b-41e8-927d-8bb4ce05c367" containerID="f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906" exitCode=0 Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.003336 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpnvf" event={"ID":"7a467268-207b-41e8-927d-8bb4ce05c367","Type":"ContainerDied","Data":"f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906"} Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.012118 4840 generic.go:334] "Generic (PLEG): container finished" podID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" containerID="5034a160e40e1d3d09327f128a29be8e82f2de86ffba145117aee8dc14f2d040" exitCode=0 Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.012665 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p5t7p" event={"ID":"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2","Type":"ContainerDied","Data":"5034a160e40e1d3d09327f128a29be8e82f2de86ffba145117aee8dc14f2d040"} Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.012996 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p5t7p" event={"ID":"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2","Type":"ContainerStarted","Data":"1e840a821d249c7303b2a3dc3f3081075f45de29597e409686ca2741a0c02162"} Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.093243 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm8st\" (UniqueName: \"kubernetes.io/projected/7258ff5e-2dee-4812-a494-27a3aa0940a8-kube-api-access-nm8st\") pod \"redhat-operators-xjwsx\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.093354 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-utilities\") pod \"redhat-operators-xjwsx\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.093416 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-catalog-content\") pod \"redhat-operators-xjwsx\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.194705 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm8st\" (UniqueName: \"kubernetes.io/projected/7258ff5e-2dee-4812-a494-27a3aa0940a8-kube-api-access-nm8st\") pod \"redhat-operators-xjwsx\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.194785 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-utilities\") pod \"redhat-operators-xjwsx\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.194836 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-catalog-content\") pod \"redhat-operators-xjwsx\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.195273 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-catalog-content\") pod \"redhat-operators-xjwsx\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.195587 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-utilities\") pod \"redhat-operators-xjwsx\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.215079 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm8st\" (UniqueName: \"kubernetes.io/projected/7258ff5e-2dee-4812-a494-27a3aa0940a8-kube-api-access-nm8st\") pod \"redhat-operators-xjwsx\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.288825 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.311302 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.398352 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0eb66ea0-ca48-4f56-8911-0a048eb73a04-config-volume\") pod \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.398868 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x562z\" (UniqueName: \"kubernetes.io/projected/0eb66ea0-ca48-4f56-8911-0a048eb73a04-kube-api-access-x562z\") pod \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.398914 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0eb66ea0-ca48-4f56-8911-0a048eb73a04-secret-volume\") pod \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\" (UID: \"0eb66ea0-ca48-4f56-8911-0a048eb73a04\") " Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.398862 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0eb66ea0-ca48-4f56-8911-0a048eb73a04-config-volume" (OuterVolumeSpecName: "config-volume") pod "0eb66ea0-ca48-4f56-8911-0a048eb73a04" (UID: "0eb66ea0-ca48-4f56-8911-0a048eb73a04"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.400871 4840 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0eb66ea0-ca48-4f56-8911-0a048eb73a04-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.402141 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eb66ea0-ca48-4f56-8911-0a048eb73a04-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0eb66ea0-ca48-4f56-8911-0a048eb73a04" (UID: "0eb66ea0-ca48-4f56-8911-0a048eb73a04"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.408739 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0eb66ea0-ca48-4f56-8911-0a048eb73a04-kube-api-access-x562z" (OuterVolumeSpecName: "kube-api-access-x562z") pod "0eb66ea0-ca48-4f56-8911-0a048eb73a04" (UID: "0eb66ea0-ca48-4f56-8911-0a048eb73a04"). InnerVolumeSpecName "kube-api-access-x562z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.502742 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x562z\" (UniqueName: \"kubernetes.io/projected/0eb66ea0-ca48-4f56-8911-0a048eb73a04-kube-api-access-x562z\") on node \"crc\" DevicePath \"\"" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.502774 4840 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0eb66ea0-ca48-4f56-8911-0a048eb73a04-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.508239 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w2jxv"] Dec 09 16:59:22 crc kubenswrapper[4840]: W1209 16:59:22.517583 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e4ee4ac_929f_41ef_b1ed_ea6e070793bd.slice/crio-f566945c8b34e03ca8ba769da90b9a8fe1677257767542e6df585b8adb5b4610 WatchSource:0}: Error finding container f566945c8b34e03ca8ba769da90b9a8fe1677257767542e6df585b8adb5b4610: Status 404 returned error can't find the container with id f566945c8b34e03ca8ba769da90b9a8fe1677257767542e6df585b8adb5b4610 Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.587441 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.587498 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.589042 4840 patch_prober.go:28] interesting pod/console-f9d7485db-z8p7f container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.589091 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-z8p7f" podUID="2c88493e-2461-4e30-b7c9-803beb3fec3b" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.604271 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:22 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:22 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:22 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.604328 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:22 crc kubenswrapper[4840]: W1209 16:59:22.632758 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-d6698a24c6d6b6c0e4a520511d95a914fb24f44ba199b4c7d43ce40d1378b3ae WatchSource:0}: Error finding container d6698a24c6d6b6c0e4a520511d95a914fb24f44ba199b4c7d43ce40d1378b3ae: Status 404 returned error can't find the container with id d6698a24c6d6b6c0e4a520511d95a914fb24f44ba199b4c7d43ce40d1378b3ae Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.692913 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.695533 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.698948 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.720958 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.721033 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.726551 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:22 crc kubenswrapper[4840]: I1209 16:59:22.780594 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xjwsx"] Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.065103 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.065150 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc" event={"ID":"0eb66ea0-ca48-4f56-8911-0a048eb73a04","Type":"ContainerDied","Data":"02ee1b64d8a1541406f9f2cc7d77ac01a2563068d8fbf4ace57259f12e8e6b1a"} Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.065901 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02ee1b64d8a1541406f9f2cc7d77ac01a2563068d8fbf4ace57259f12e8e6b1a" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.074022 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjwsx" event={"ID":"7258ff5e-2dee-4812-a494-27a3aa0940a8","Type":"ContainerStarted","Data":"3b311f2701ba2a91e586dd89e60c2cf24077fbe84ea14435e70afce2e06ee4d6"} Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.083715 4840 generic.go:334] "Generic (PLEG): container finished" podID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" containerID="faeeee77c57a2550f5615bbf60dc71dbc12acb2879091571e0f3e82cacb8062d" exitCode=0 Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.083919 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2jxv" event={"ID":"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd","Type":"ContainerDied","Data":"faeeee77c57a2550f5615bbf60dc71dbc12acb2879091571e0f3e82cacb8062d"} Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.083977 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2jxv" event={"ID":"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd","Type":"ContainerStarted","Data":"f566945c8b34e03ca8ba769da90b9a8fe1677257767542e6df585b8adb5b4610"} Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.090474 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"5a3a11c5199ac618929af84df614604ee0ed29ba48641e3cc8f82692ef9c649f"} Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.090508 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"d6698a24c6d6b6c0e4a520511d95a914fb24f44ba199b4c7d43ce40d1378b3ae"} Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.093884 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"3523491cd0796ed3c0543255c436ebea84aab4e29b7fc325068585b758fab365"} Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.093904 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"d8b6e740c552d0333e4ca978193006a6f4da27f3b800e2f4160fedcc611a13f8"} Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.094272 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.097515 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"41af205af7a486cc76abb3a99499c44beb1071e6df9f4afb5c8d16b99d22dccf"} Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.097569 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"690b2866b57a3e5f2fc42e48ed16ad1ec0bc57c5d8b1b3d74a0e3c518edde200"} Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.105687 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-4whfq" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.108843 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-qfszn" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.206149 4840 patch_prober.go:28] interesting pod/downloads-7954f5f757-h5ltv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.206198 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-h5ltv" podUID="acf89e2d-7f49-4872-96dd-41d47629998c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.206231 4840 patch_prober.go:28] interesting pod/downloads-7954f5f757-h5ltv container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.206267 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-h5ltv" podUID="acf89e2d-7f49-4872-96dd-41d47629998c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.489257 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.600426 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.603931 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:23 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:23 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:23 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.604019 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.621850 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52a55433-0acc-41ad-9132-31cfb5375627-kubelet-dir\") pod \"52a55433-0acc-41ad-9132-31cfb5375627\" (UID: \"52a55433-0acc-41ad-9132-31cfb5375627\") " Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.621957 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52a55433-0acc-41ad-9132-31cfb5375627-kube-api-access\") pod \"52a55433-0acc-41ad-9132-31cfb5375627\" (UID: \"52a55433-0acc-41ad-9132-31cfb5375627\") " Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.621994 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/52a55433-0acc-41ad-9132-31cfb5375627-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "52a55433-0acc-41ad-9132-31cfb5375627" (UID: "52a55433-0acc-41ad-9132-31cfb5375627"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.622250 4840 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52a55433-0acc-41ad-9132-31cfb5375627-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.626401 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52a55433-0acc-41ad-9132-31cfb5375627-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "52a55433-0acc-41ad-9132-31cfb5375627" (UID: "52a55433-0acc-41ad-9132-31cfb5375627"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:59:23 crc kubenswrapper[4840]: I1209 16:59:23.723237 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52a55433-0acc-41ad-9132-31cfb5375627-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 16:59:24 crc kubenswrapper[4840]: I1209 16:59:24.110232 4840 generic.go:334] "Generic (PLEG): container finished" podID="7258ff5e-2dee-4812-a494-27a3aa0940a8" containerID="4926fc9a5a23f2997cda6469f5adf396fbe00ee6bb112db086239d1331fd7a37" exitCode=0 Dec 09 16:59:24 crc kubenswrapper[4840]: I1209 16:59:24.110501 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjwsx" event={"ID":"7258ff5e-2dee-4812-a494-27a3aa0940a8","Type":"ContainerDied","Data":"4926fc9a5a23f2997cda6469f5adf396fbe00ee6bb112db086239d1331fd7a37"} Dec 09 16:59:24 crc kubenswrapper[4840]: I1209 16:59:24.143431 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"52a55433-0acc-41ad-9132-31cfb5375627","Type":"ContainerDied","Data":"b20f417b5bbec212d7e5af9e95509c4983e4196e28bc03b64b5a78cc1096a7bd"} Dec 09 16:59:24 crc kubenswrapper[4840]: I1209 16:59:24.143475 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b20f417b5bbec212d7e5af9e95509c4983e4196e28bc03b64b5a78cc1096a7bd" Dec 09 16:59:24 crc kubenswrapper[4840]: I1209 16:59:24.143520 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 16:59:24 crc kubenswrapper[4840]: I1209 16:59:24.603483 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:24 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:24 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:24 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:24 crc kubenswrapper[4840]: I1209 16:59:24.603544 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.272473 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 09 16:59:25 crc kubenswrapper[4840]: E1209 16:59:25.272863 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eb66ea0-ca48-4f56-8911-0a048eb73a04" containerName="collect-profiles" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.272881 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eb66ea0-ca48-4f56-8911-0a048eb73a04" containerName="collect-profiles" Dec 09 16:59:25 crc kubenswrapper[4840]: E1209 16:59:25.272893 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52a55433-0acc-41ad-9132-31cfb5375627" containerName="pruner" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.272900 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="52a55433-0acc-41ad-9132-31cfb5375627" containerName="pruner" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.273038 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="52a55433-0acc-41ad-9132-31cfb5375627" containerName="pruner" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.273050 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eb66ea0-ca48-4f56-8911-0a048eb73a04" containerName="collect-profiles" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.273751 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.275251 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.275373 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.276847 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.371569 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aa22f122-455f-42d6-9ca2-f00291ab637a-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"aa22f122-455f-42d6-9ca2-f00291ab637a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.371671 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aa22f122-455f-42d6-9ca2-f00291ab637a-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"aa22f122-455f-42d6-9ca2-f00291ab637a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.472547 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aa22f122-455f-42d6-9ca2-f00291ab637a-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"aa22f122-455f-42d6-9ca2-f00291ab637a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.472628 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aa22f122-455f-42d6-9ca2-f00291ab637a-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"aa22f122-455f-42d6-9ca2-f00291ab637a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.472788 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aa22f122-455f-42d6-9ca2-f00291ab637a-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"aa22f122-455f-42d6-9ca2-f00291ab637a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.496689 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aa22f122-455f-42d6-9ca2-f00291ab637a-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"aa22f122-455f-42d6-9ca2-f00291ab637a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.597982 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.604373 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:25 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:25 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:25 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.604407 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:25 crc kubenswrapper[4840]: I1209 16:59:25.907549 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 09 16:59:26 crc kubenswrapper[4840]: I1209 16:59:26.604571 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:26 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:26 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:26 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:26 crc kubenswrapper[4840]: I1209 16:59:26.604982 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:27 crc kubenswrapper[4840]: I1209 16:59:27.613936 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:27 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:27 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:27 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:27 crc kubenswrapper[4840]: I1209 16:59:27.614033 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:28 crc kubenswrapper[4840]: I1209 16:59:28.602209 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:28 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:28 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:28 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:28 crc kubenswrapper[4840]: I1209 16:59:28.602269 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:28 crc kubenswrapper[4840]: I1209 16:59:28.808490 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-vvcq2" Dec 09 16:59:29 crc kubenswrapper[4840]: I1209 16:59:29.603387 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:29 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:29 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:29 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:29 crc kubenswrapper[4840]: I1209 16:59:29.603624 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:30 crc kubenswrapper[4840]: I1209 16:59:30.602591 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:30 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:30 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:30 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:30 crc kubenswrapper[4840]: I1209 16:59:30.602641 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:31 crc kubenswrapper[4840]: I1209 16:59:31.602787 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:31 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:31 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:31 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:31 crc kubenswrapper[4840]: I1209 16:59:31.602843 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:32 crc kubenswrapper[4840]: I1209 16:59:32.384957 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 16:59:32 crc kubenswrapper[4840]: I1209 16:59:32.588749 4840 patch_prober.go:28] interesting pod/console-f9d7485db-z8p7f container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Dec 09 16:59:32 crc kubenswrapper[4840]: I1209 16:59:32.588836 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-z8p7f" podUID="2c88493e-2461-4e30-b7c9-803beb3fec3b" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Dec 09 16:59:32 crc kubenswrapper[4840]: I1209 16:59:32.603705 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:32 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:32 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:32 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:32 crc kubenswrapper[4840]: I1209 16:59:32.603786 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:33 crc kubenswrapper[4840]: I1209 16:59:33.216886 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-h5ltv" Dec 09 16:59:33 crc kubenswrapper[4840]: I1209 16:59:33.603595 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:33 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:33 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:33 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:33 crc kubenswrapper[4840]: I1209 16:59:33.603696 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:34 crc kubenswrapper[4840]: I1209 16:59:34.037092 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:59:34 crc kubenswrapper[4840]: I1209 16:59:34.037594 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:59:34 crc kubenswrapper[4840]: I1209 16:59:34.605597 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:34 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:34 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:34 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:34 crc kubenswrapper[4840]: I1209 16:59:34.605705 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:35 crc kubenswrapper[4840]: W1209 16:59:35.363581 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podaa22f122_455f_42d6_9ca2_f00291ab637a.slice/crio-e7643f4f119504af55c77bf7ddec8283e7f1c6e360faaae3e925568191d887c8 WatchSource:0}: Error finding container e7643f4f119504af55c77bf7ddec8283e7f1c6e360faaae3e925568191d887c8: Status 404 returned error can't find the container with id e7643f4f119504af55c77bf7ddec8283e7f1c6e360faaae3e925568191d887c8 Dec 09 16:59:35 crc kubenswrapper[4840]: I1209 16:59:35.602998 4840 patch_prober.go:28] interesting pod/router-default-5444994796-ngfrq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 16:59:35 crc kubenswrapper[4840]: [-]has-synced failed: reason withheld Dec 09 16:59:35 crc kubenswrapper[4840]: [+]process-running ok Dec 09 16:59:35 crc kubenswrapper[4840]: healthz check failed Dec 09 16:59:35 crc kubenswrapper[4840]: I1209 16:59:35.603109 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ngfrq" podUID="f75c37d0-2465-4202-9cf4-981ee305fe89" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 16:59:36 crc kubenswrapper[4840]: I1209 16:59:36.237951 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"aa22f122-455f-42d6-9ca2-f00291ab637a","Type":"ContainerStarted","Data":"e7643f4f119504af55c77bf7ddec8283e7f1c6e360faaae3e925568191d887c8"} Dec 09 16:59:36 crc kubenswrapper[4840]: I1209 16:59:36.605160 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:36 crc kubenswrapper[4840]: I1209 16:59:36.617052 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-ngfrq" Dec 09 16:59:40 crc kubenswrapper[4840]: I1209 16:59:40.233083 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 16:59:40 crc kubenswrapper[4840]: I1209 16:59:40.452923 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:59:40 crc kubenswrapper[4840]: I1209 16:59:40.459667 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2099e918-a035-4659-8247-971e3e59c6ef-metrics-certs\") pod \"network-metrics-daemon-hc4xq\" (UID: \"2099e918-a035-4659-8247-971e3e59c6ef\") " pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:59:40 crc kubenswrapper[4840]: I1209 16:59:40.486073 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hc4xq" Dec 09 16:59:42 crc kubenswrapper[4840]: I1209 16:59:42.775373 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:42 crc kubenswrapper[4840]: I1209 16:59:42.779629 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 16:59:47 crc kubenswrapper[4840]: E1209 16:59:47.014135 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 09 16:59:47 crc kubenswrapper[4840]: E1209 16:59:47.014669 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7d8ps,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-rgpwz_openshift-marketplace(470d5b30-6a3d-4d02-9ef4-ce35ea66af80): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 16:59:47 crc kubenswrapper[4840]: E1209 16:59:47.016065 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-rgpwz" podUID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" Dec 09 16:59:47 crc kubenswrapper[4840]: E1209 16:59:47.181927 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 09 16:59:47 crc kubenswrapper[4840]: E1209 16:59:47.182353 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h8cmr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-pcvgz_openshift-marketplace(8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 16:59:47 crc kubenswrapper[4840]: E1209 16:59:47.183525 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-pcvgz" podUID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" Dec 09 16:59:50 crc kubenswrapper[4840]: E1209 16:59:50.732499 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-rgpwz" podUID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" Dec 09 16:59:50 crc kubenswrapper[4840]: E1209 16:59:50.733707 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-pcvgz" podUID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" Dec 09 16:59:53 crc kubenswrapper[4840]: E1209 16:59:53.346035 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 09 16:59:53 crc kubenswrapper[4840]: E1209 16:59:53.346682 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dkdgq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-78kws_openshift-marketplace(2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 16:59:53 crc kubenswrapper[4840]: E1209 16:59:53.347074 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 09 16:59:53 crc kubenswrapper[4840]: E1209 16:59:53.347148 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wv7q5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-br44r_openshift-marketplace(d12f03b6-5a9d-479e-9e73-2f2476161d97): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 16:59:53 crc kubenswrapper[4840]: E1209 16:59:53.348759 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-78kws" podUID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" Dec 09 16:59:53 crc kubenswrapper[4840]: E1209 16:59:53.348842 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-br44r" podUID="d12f03b6-5a9d-479e-9e73-2f2476161d97" Dec 09 16:59:53 crc kubenswrapper[4840]: I1209 16:59:53.736798 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-hc4xq"] Dec 09 16:59:53 crc kubenswrapper[4840]: I1209 16:59:53.751690 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9zr2s" Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.361678 4840 generic.go:334] "Generic (PLEG): container finished" podID="7258ff5e-2dee-4812-a494-27a3aa0940a8" containerID="e16ca5aed0211e6da82ab5ff1048ab4c26719d79d045152f62298b0cf5e38521" exitCode=0 Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.361763 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjwsx" event={"ID":"7258ff5e-2dee-4812-a494-27a3aa0940a8","Type":"ContainerDied","Data":"e16ca5aed0211e6da82ab5ff1048ab4c26719d79d045152f62298b0cf5e38521"} Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.366052 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2jxv" event={"ID":"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd","Type":"ContainerStarted","Data":"5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1"} Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.368515 4840 generic.go:334] "Generic (PLEG): container finished" podID="aa22f122-455f-42d6-9ca2-f00291ab637a" containerID="8e669137561b871d74b87ece86e7422981a02a6784e22ea3719000603c050cb3" exitCode=0 Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.368629 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"aa22f122-455f-42d6-9ca2-f00291ab637a","Type":"ContainerDied","Data":"8e669137561b871d74b87ece86e7422981a02a6784e22ea3719000603c050cb3"} Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.370902 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" event={"ID":"2099e918-a035-4659-8247-971e3e59c6ef","Type":"ContainerStarted","Data":"77093309dd74130ceca54719dceb20ead199b5dce9954acdb30f602af472f25f"} Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.370938 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" event={"ID":"2099e918-a035-4659-8247-971e3e59c6ef","Type":"ContainerStarted","Data":"770fb593bbb400276a747c2f12937c489fd269fea47ca6fb2deb10d5d1f768e0"} Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.370948 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-hc4xq" event={"ID":"2099e918-a035-4659-8247-971e3e59c6ef","Type":"ContainerStarted","Data":"6cd41e0d5bc7eeda54b27fa0502f7e4ed3adf14a22937698eb818616e67bf5c4"} Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.373859 4840 generic.go:334] "Generic (PLEG): container finished" podID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" containerID="3dd7f9e52bd4131da7ee43fb8d795163e5d10089be16d83b3110f11a1bd14389" exitCode=0 Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.373926 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p5t7p" event={"ID":"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2","Type":"ContainerDied","Data":"3dd7f9e52bd4131da7ee43fb8d795163e5d10089be16d83b3110f11a1bd14389"} Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.383248 4840 generic.go:334] "Generic (PLEG): container finished" podID="7a467268-207b-41e8-927d-8bb4ce05c367" containerID="60854aa5d3f3b55f98792a4c3cb47c8bc869b9cb86b3bd97509d8b992164723b" exitCode=0 Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.383587 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpnvf" event={"ID":"7a467268-207b-41e8-927d-8bb4ce05c367","Type":"ContainerDied","Data":"60854aa5d3f3b55f98792a4c3cb47c8bc869b9cb86b3bd97509d8b992164723b"} Dec 09 16:59:54 crc kubenswrapper[4840]: E1209 16:59:54.385082 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-br44r" podUID="d12f03b6-5a9d-479e-9e73-2f2476161d97" Dec 09 16:59:54 crc kubenswrapper[4840]: E1209 16:59:54.385313 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-78kws" podUID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" Dec 09 16:59:54 crc kubenswrapper[4840]: I1209 16:59:54.406670 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-hc4xq" podStartSLOduration=157.406647994 podStartE2EDuration="2m37.406647994s" podCreationTimestamp="2025-12-09 16:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:59:54.405053956 +0000 UTC m=+180.396164589" watchObservedRunningTime="2025-12-09 16:59:54.406647994 +0000 UTC m=+180.397758627" Dec 09 16:59:55 crc kubenswrapper[4840]: I1209 16:59:55.397143 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpnvf" event={"ID":"7a467268-207b-41e8-927d-8bb4ce05c367","Type":"ContainerStarted","Data":"7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6"} Dec 09 16:59:55 crc kubenswrapper[4840]: I1209 16:59:55.399936 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p5t7p" event={"ID":"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2","Type":"ContainerStarted","Data":"7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125"} Dec 09 16:59:55 crc kubenswrapper[4840]: I1209 16:59:55.404278 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjwsx" event={"ID":"7258ff5e-2dee-4812-a494-27a3aa0940a8","Type":"ContainerStarted","Data":"5f5bee2c6ead2ab0269a91a8afb601214d7c8adb637f3aff5a55cadd6b987a93"} Dec 09 16:59:55 crc kubenswrapper[4840]: I1209 16:59:55.407196 4840 generic.go:334] "Generic (PLEG): container finished" podID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" containerID="5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1" exitCode=0 Dec 09 16:59:55 crc kubenswrapper[4840]: I1209 16:59:55.407296 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2jxv" event={"ID":"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd","Type":"ContainerDied","Data":"5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1"} Dec 09 16:59:55 crc kubenswrapper[4840]: I1209 16:59:55.427235 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xpnvf" podStartSLOduration=2.586730828 podStartE2EDuration="35.427215177s" podCreationTimestamp="2025-12-09 16:59:20 +0000 UTC" firstStartedPulling="2025-12-09 16:59:22.019870775 +0000 UTC m=+148.010981408" lastFinishedPulling="2025-12-09 16:59:54.860355124 +0000 UTC m=+180.851465757" observedRunningTime="2025-12-09 16:59:55.422469013 +0000 UTC m=+181.413579676" watchObservedRunningTime="2025-12-09 16:59:55.427215177 +0000 UTC m=+181.418325830" Dec 09 16:59:55 crc kubenswrapper[4840]: I1209 16:59:55.448091 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p5t7p" podStartSLOduration=2.462552569 podStartE2EDuration="35.448072582s" podCreationTimestamp="2025-12-09 16:59:20 +0000 UTC" firstStartedPulling="2025-12-09 16:59:22.022470044 +0000 UTC m=+148.013580677" lastFinishedPulling="2025-12-09 16:59:55.007990047 +0000 UTC m=+180.999100690" observedRunningTime="2025-12-09 16:59:55.444789752 +0000 UTC m=+181.435900385" watchObservedRunningTime="2025-12-09 16:59:55.448072582 +0000 UTC m=+181.439183225" Dec 09 16:59:55 crc kubenswrapper[4840]: I1209 16:59:55.473000 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xjwsx" podStartSLOduration=3.7749581819999998 podStartE2EDuration="34.47298601s" podCreationTimestamp="2025-12-09 16:59:21 +0000 UTC" firstStartedPulling="2025-12-09 16:59:24.112906151 +0000 UTC m=+150.104016784" lastFinishedPulling="2025-12-09 16:59:54.810933949 +0000 UTC m=+180.802044612" observedRunningTime="2025-12-09 16:59:55.470925448 +0000 UTC m=+181.462036081" watchObservedRunningTime="2025-12-09 16:59:55.47298601 +0000 UTC m=+181.464096643" Dec 09 16:59:55 crc kubenswrapper[4840]: I1209 16:59:55.916468 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 16:59:56 crc kubenswrapper[4840]: I1209 16:59:56.064328 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aa22f122-455f-42d6-9ca2-f00291ab637a-kube-api-access\") pod \"aa22f122-455f-42d6-9ca2-f00291ab637a\" (UID: \"aa22f122-455f-42d6-9ca2-f00291ab637a\") " Dec 09 16:59:56 crc kubenswrapper[4840]: I1209 16:59:56.064446 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aa22f122-455f-42d6-9ca2-f00291ab637a-kubelet-dir\") pod \"aa22f122-455f-42d6-9ca2-f00291ab637a\" (UID: \"aa22f122-455f-42d6-9ca2-f00291ab637a\") " Dec 09 16:59:56 crc kubenswrapper[4840]: I1209 16:59:56.064489 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aa22f122-455f-42d6-9ca2-f00291ab637a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "aa22f122-455f-42d6-9ca2-f00291ab637a" (UID: "aa22f122-455f-42d6-9ca2-f00291ab637a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 16:59:56 crc kubenswrapper[4840]: I1209 16:59:56.064693 4840 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aa22f122-455f-42d6-9ca2-f00291ab637a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 09 16:59:56 crc kubenswrapper[4840]: I1209 16:59:56.073132 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa22f122-455f-42d6-9ca2-f00291ab637a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "aa22f122-455f-42d6-9ca2-f00291ab637a" (UID: "aa22f122-455f-42d6-9ca2-f00291ab637a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:59:56 crc kubenswrapper[4840]: I1209 16:59:56.165711 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aa22f122-455f-42d6-9ca2-f00291ab637a-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 16:59:56 crc kubenswrapper[4840]: I1209 16:59:56.414019 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"aa22f122-455f-42d6-9ca2-f00291ab637a","Type":"ContainerDied","Data":"e7643f4f119504af55c77bf7ddec8283e7f1c6e360faaae3e925568191d887c8"} Dec 09 16:59:56 crc kubenswrapper[4840]: I1209 16:59:56.414302 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7643f4f119504af55c77bf7ddec8283e7f1c6e360faaae3e925568191d887c8" Dec 09 16:59:56 crc kubenswrapper[4840]: I1209 16:59:56.414101 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 16:59:56 crc kubenswrapper[4840]: I1209 16:59:56.417189 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2jxv" event={"ID":"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd","Type":"ContainerStarted","Data":"c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab"} Dec 09 16:59:56 crc kubenswrapper[4840]: I1209 16:59:56.436943 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-w2jxv" podStartSLOduration=2.749360475 podStartE2EDuration="35.4369254s" podCreationTimestamp="2025-12-09 16:59:21 +0000 UTC" firstStartedPulling="2025-12-09 16:59:23.087662185 +0000 UTC m=+149.078772818" lastFinishedPulling="2025-12-09 16:59:55.77522711 +0000 UTC m=+181.766337743" observedRunningTime="2025-12-09 16:59:56.4359262 +0000 UTC m=+182.427036833" watchObservedRunningTime="2025-12-09 16:59:56.4369254 +0000 UTC m=+182.428036033" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.131038 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j"] Dec 09 17:00:00 crc kubenswrapper[4840]: E1209 17:00:00.131304 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa22f122-455f-42d6-9ca2-f00291ab637a" containerName="pruner" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.131319 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa22f122-455f-42d6-9ca2-f00291ab637a" containerName="pruner" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.131446 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa22f122-455f-42d6-9ca2-f00291ab637a" containerName="pruner" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.131883 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.133945 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.134224 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.141872 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j"] Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.320468 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gktnt\" (UniqueName: \"kubernetes.io/projected/e1c95712-598d-415d-b080-c5b7430d6186-kube-api-access-gktnt\") pod \"collect-profiles-29421660-sm56j\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.320914 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e1c95712-598d-415d-b080-c5b7430d6186-secret-volume\") pod \"collect-profiles-29421660-sm56j\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.321109 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1c95712-598d-415d-b080-c5b7430d6186-config-volume\") pod \"collect-profiles-29421660-sm56j\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.419699 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.420289 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.421929 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gktnt\" (UniqueName: \"kubernetes.io/projected/e1c95712-598d-415d-b080-c5b7430d6186-kube-api-access-gktnt\") pod \"collect-profiles-29421660-sm56j\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.422062 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e1c95712-598d-415d-b080-c5b7430d6186-secret-volume\") pod \"collect-profiles-29421660-sm56j\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.422098 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1c95712-598d-415d-b080-c5b7430d6186-config-volume\") pod \"collect-profiles-29421660-sm56j\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.422945 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1c95712-598d-415d-b080-c5b7430d6186-config-volume\") pod \"collect-profiles-29421660-sm56j\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.432835 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e1c95712-598d-415d-b080-c5b7430d6186-secret-volume\") pod \"collect-profiles-29421660-sm56j\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.446452 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gktnt\" (UniqueName: \"kubernetes.io/projected/e1c95712-598d-415d-b080-c5b7430d6186-kube-api-access-gktnt\") pod \"collect-profiles-29421660-sm56j\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.448477 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.522500 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.862506 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j"] Dec 09 17:00:00 crc kubenswrapper[4840]: W1209 17:00:00.870759 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode1c95712_598d_415d_b080_c5b7430d6186.slice/crio-3076f9acb81b9accfe9de87d43872c8dcc05e54719e506a489e8c6712bf147ca WatchSource:0}: Error finding container 3076f9acb81b9accfe9de87d43872c8dcc05e54719e506a489e8c6712bf147ca: Status 404 returned error can't find the container with id 3076f9acb81b9accfe9de87d43872c8dcc05e54719e506a489e8c6712bf147ca Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.883796 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.884049 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 17:00:00 crc kubenswrapper[4840]: I1209 17:00:00.948323 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 17:00:01 crc kubenswrapper[4840]: I1209 17:00:01.445753 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" event={"ID":"e1c95712-598d-415d-b080-c5b7430d6186","Type":"ContainerStarted","Data":"3076f9acb81b9accfe9de87d43872c8dcc05e54719e506a489e8c6712bf147ca"} Dec 09 17:00:01 crc kubenswrapper[4840]: I1209 17:00:01.490481 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 17:00:01 crc kubenswrapper[4840]: I1209 17:00:01.495515 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 17:00:01 crc kubenswrapper[4840]: I1209 17:00:01.860298 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 17:00:01 crc kubenswrapper[4840]: I1209 17:00:01.860348 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 17:00:01 crc kubenswrapper[4840]: I1209 17:00:01.899469 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.000976 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.056455 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.057692 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.063129 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.063674 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.075336 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pr2p8"] Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.080942 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.244543 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a94a9d5f-9e10-4f8c-a992-abfb95761773-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a94a9d5f-9e10-4f8c-a992-abfb95761773\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.244838 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a94a9d5f-9e10-4f8c-a992-abfb95761773-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a94a9d5f-9e10-4f8c-a992-abfb95761773\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.289193 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.289241 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.335222 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.346199 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a94a9d5f-9e10-4f8c-a992-abfb95761773-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a94a9d5f-9e10-4f8c-a992-abfb95761773\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.346237 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a94a9d5f-9e10-4f8c-a992-abfb95761773-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a94a9d5f-9e10-4f8c-a992-abfb95761773\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.346315 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a94a9d5f-9e10-4f8c-a992-abfb95761773-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a94a9d5f-9e10-4f8c-a992-abfb95761773\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.366749 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a94a9d5f-9e10-4f8c-a992-abfb95761773-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a94a9d5f-9e10-4f8c-a992-abfb95761773\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.383071 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.459602 4840 generic.go:334] "Generic (PLEG): container finished" podID="e1c95712-598d-415d-b080-c5b7430d6186" containerID="1376d1d11562b55216e698ca9e6169ce8d0d5779c1da116b8f6d17fed8c9e946" exitCode=0 Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.460125 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" event={"ID":"e1c95712-598d-415d-b080-c5b7430d6186","Type":"ContainerDied","Data":"1376d1d11562b55216e698ca9e6169ce8d0d5779c1da116b8f6d17fed8c9e946"} Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.505930 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.513884 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 17:00:02 crc kubenswrapper[4840]: I1209 17:00:02.817807 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 09 17:00:03 crc kubenswrapper[4840]: I1209 17:00:03.389329 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p5t7p"] Dec 09 17:00:03 crc kubenswrapper[4840]: I1209 17:00:03.466185 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a94a9d5f-9e10-4f8c-a992-abfb95761773","Type":"ContainerStarted","Data":"b85a148500e7e55d6edacb108d46f25ef2751723f254db6c9122369b2c6fd4fd"} Dec 09 17:00:03 crc kubenswrapper[4840]: I1209 17:00:03.785663 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:03 crc kubenswrapper[4840]: I1209 17:00:03.908361 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1c95712-598d-415d-b080-c5b7430d6186-config-volume\") pod \"e1c95712-598d-415d-b080-c5b7430d6186\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " Dec 09 17:00:03 crc kubenswrapper[4840]: I1209 17:00:03.908433 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gktnt\" (UniqueName: \"kubernetes.io/projected/e1c95712-598d-415d-b080-c5b7430d6186-kube-api-access-gktnt\") pod \"e1c95712-598d-415d-b080-c5b7430d6186\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " Dec 09 17:00:03 crc kubenswrapper[4840]: I1209 17:00:03.908563 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e1c95712-598d-415d-b080-c5b7430d6186-secret-volume\") pod \"e1c95712-598d-415d-b080-c5b7430d6186\" (UID: \"e1c95712-598d-415d-b080-c5b7430d6186\") " Dec 09 17:00:03 crc kubenswrapper[4840]: I1209 17:00:03.909160 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1c95712-598d-415d-b080-c5b7430d6186-config-volume" (OuterVolumeSpecName: "config-volume") pod "e1c95712-598d-415d-b080-c5b7430d6186" (UID: "e1c95712-598d-415d-b080-c5b7430d6186"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:00:03 crc kubenswrapper[4840]: I1209 17:00:03.914171 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1c95712-598d-415d-b080-c5b7430d6186-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e1c95712-598d-415d-b080-c5b7430d6186" (UID: "e1c95712-598d-415d-b080-c5b7430d6186"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:00:03 crc kubenswrapper[4840]: I1209 17:00:03.915602 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1c95712-598d-415d-b080-c5b7430d6186-kube-api-access-gktnt" (OuterVolumeSpecName: "kube-api-access-gktnt") pod "e1c95712-598d-415d-b080-c5b7430d6186" (UID: "e1c95712-598d-415d-b080-c5b7430d6186"). InnerVolumeSpecName "kube-api-access-gktnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.009821 4840 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e1c95712-598d-415d-b080-c5b7430d6186-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.009895 4840 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1c95712-598d-415d-b080-c5b7430d6186-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.009908 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gktnt\" (UniqueName: \"kubernetes.io/projected/e1c95712-598d-415d-b080-c5b7430d6186-kube-api-access-gktnt\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.036071 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.036130 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.472363 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.472358 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j" event={"ID":"e1c95712-598d-415d-b080-c5b7430d6186","Type":"ContainerDied","Data":"3076f9acb81b9accfe9de87d43872c8dcc05e54719e506a489e8c6712bf147ca"} Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.472499 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3076f9acb81b9accfe9de87d43872c8dcc05e54719e506a489e8c6712bf147ca" Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.473638 4840 generic.go:334] "Generic (PLEG): container finished" podID="a94a9d5f-9e10-4f8c-a992-abfb95761773" containerID="17d4af3b84fe5c7dc27b2fb96f45c01fe1a9cc9d4e63920c0185d51c29ac0625" exitCode=0 Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.473877 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p5t7p" podUID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" containerName="registry-server" containerID="cri-o://7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125" gracePeriod=2 Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.474104 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a94a9d5f-9e10-4f8c-a992-abfb95761773","Type":"ContainerDied","Data":"17d4af3b84fe5c7dc27b2fb96f45c01fe1a9cc9d4e63920c0185d51c29ac0625"} Dec 09 17:00:04 crc kubenswrapper[4840]: I1209 17:00:04.899326 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.021915 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-catalog-content\") pod \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.021978 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-utilities\") pod \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.022025 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d88z5\" (UniqueName: \"kubernetes.io/projected/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-kube-api-access-d88z5\") pod \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\" (UID: \"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2\") " Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.023162 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-utilities" (OuterVolumeSpecName: "utilities") pod "dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" (UID: "dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.030105 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-kube-api-access-d88z5" (OuterVolumeSpecName: "kube-api-access-d88z5") pod "dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" (UID: "dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2"). InnerVolumeSpecName "kube-api-access-d88z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.045838 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" (UID: "dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.123294 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.123328 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.123337 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d88z5\" (UniqueName: \"kubernetes.io/projected/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2-kube-api-access-d88z5\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.481719 4840 generic.go:334] "Generic (PLEG): container finished" podID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" containerID="7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125" exitCode=0 Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.481786 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p5t7p" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.481803 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p5t7p" event={"ID":"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2","Type":"ContainerDied","Data":"7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125"} Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.482209 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p5t7p" event={"ID":"dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2","Type":"ContainerDied","Data":"1e840a821d249c7303b2a3dc3f3081075f45de29597e409686ca2741a0c02162"} Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.482283 4840 scope.go:117] "RemoveContainer" containerID="7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.485891 4840 generic.go:334] "Generic (PLEG): container finished" podID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" containerID="3c328deaca6374991b8763844f5640d12b1b7ad0f949d65fb2f334f8e999d33c" exitCode=0 Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.486063 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rgpwz" event={"ID":"470d5b30-6a3d-4d02-9ef4-ce35ea66af80","Type":"ContainerDied","Data":"3c328deaca6374991b8763844f5640d12b1b7ad0f949d65fb2f334f8e999d33c"} Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.508837 4840 scope.go:117] "RemoveContainer" containerID="3dd7f9e52bd4131da7ee43fb8d795163e5d10089be16d83b3110f11a1bd14389" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.522812 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p5t7p"] Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.527309 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p5t7p"] Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.536566 4840 scope.go:117] "RemoveContainer" containerID="5034a160e40e1d3d09327f128a29be8e82f2de86ffba145117aee8dc14f2d040" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.553361 4840 scope.go:117] "RemoveContainer" containerID="7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125" Dec 09 17:00:05 crc kubenswrapper[4840]: E1209 17:00:05.557145 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125\": container with ID starting with 7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125 not found: ID does not exist" containerID="7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.557206 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125"} err="failed to get container status \"7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125\": rpc error: code = NotFound desc = could not find container \"7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125\": container with ID starting with 7bba9962026e3e3348623a89481ca9eb521c145cb767f8f4fb78ab70d51cb125 not found: ID does not exist" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.557303 4840 scope.go:117] "RemoveContainer" containerID="3dd7f9e52bd4131da7ee43fb8d795163e5d10089be16d83b3110f11a1bd14389" Dec 09 17:00:05 crc kubenswrapper[4840]: E1209 17:00:05.557616 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dd7f9e52bd4131da7ee43fb8d795163e5d10089be16d83b3110f11a1bd14389\": container with ID starting with 3dd7f9e52bd4131da7ee43fb8d795163e5d10089be16d83b3110f11a1bd14389 not found: ID does not exist" containerID="3dd7f9e52bd4131da7ee43fb8d795163e5d10089be16d83b3110f11a1bd14389" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.557638 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dd7f9e52bd4131da7ee43fb8d795163e5d10089be16d83b3110f11a1bd14389"} err="failed to get container status \"3dd7f9e52bd4131da7ee43fb8d795163e5d10089be16d83b3110f11a1bd14389\": rpc error: code = NotFound desc = could not find container \"3dd7f9e52bd4131da7ee43fb8d795163e5d10089be16d83b3110f11a1bd14389\": container with ID starting with 3dd7f9e52bd4131da7ee43fb8d795163e5d10089be16d83b3110f11a1bd14389 not found: ID does not exist" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.557653 4840 scope.go:117] "RemoveContainer" containerID="5034a160e40e1d3d09327f128a29be8e82f2de86ffba145117aee8dc14f2d040" Dec 09 17:00:05 crc kubenswrapper[4840]: E1209 17:00:05.557856 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5034a160e40e1d3d09327f128a29be8e82f2de86ffba145117aee8dc14f2d040\": container with ID starting with 5034a160e40e1d3d09327f128a29be8e82f2de86ffba145117aee8dc14f2d040 not found: ID does not exist" containerID="5034a160e40e1d3d09327f128a29be8e82f2de86ffba145117aee8dc14f2d040" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.557874 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5034a160e40e1d3d09327f128a29be8e82f2de86ffba145117aee8dc14f2d040"} err="failed to get container status \"5034a160e40e1d3d09327f128a29be8e82f2de86ffba145117aee8dc14f2d040\": rpc error: code = NotFound desc = could not find container \"5034a160e40e1d3d09327f128a29be8e82f2de86ffba145117aee8dc14f2d040\": container with ID starting with 5034a160e40e1d3d09327f128a29be8e82f2de86ffba145117aee8dc14f2d040 not found: ID does not exist" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.791324 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xjwsx"] Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.791579 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xjwsx" podUID="7258ff5e-2dee-4812-a494-27a3aa0940a8" containerName="registry-server" containerID="cri-o://5f5bee2c6ead2ab0269a91a8afb601214d7c8adb637f3aff5a55cadd6b987a93" gracePeriod=2 Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.794401 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.933569 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a94a9d5f-9e10-4f8c-a992-abfb95761773-kubelet-dir\") pod \"a94a9d5f-9e10-4f8c-a992-abfb95761773\" (UID: \"a94a9d5f-9e10-4f8c-a992-abfb95761773\") " Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.933660 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a94a9d5f-9e10-4f8c-a992-abfb95761773-kube-api-access\") pod \"a94a9d5f-9e10-4f8c-a992-abfb95761773\" (UID: \"a94a9d5f-9e10-4f8c-a992-abfb95761773\") " Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.934899 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a94a9d5f-9e10-4f8c-a992-abfb95761773-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a94a9d5f-9e10-4f8c-a992-abfb95761773" (UID: "a94a9d5f-9e10-4f8c-a992-abfb95761773"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:00:05 crc kubenswrapper[4840]: I1209 17:00:05.940891 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a94a9d5f-9e10-4f8c-a992-abfb95761773-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a94a9d5f-9e10-4f8c-a992-abfb95761773" (UID: "a94a9d5f-9e10-4f8c-a992-abfb95761773"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:00:06 crc kubenswrapper[4840]: I1209 17:00:06.037106 4840 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a94a9d5f-9e10-4f8c-a992-abfb95761773-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:06 crc kubenswrapper[4840]: I1209 17:00:06.037160 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a94a9d5f-9e10-4f8c-a992-abfb95761773-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:06 crc kubenswrapper[4840]: I1209 17:00:06.494233 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a94a9d5f-9e10-4f8c-a992-abfb95761773","Type":"ContainerDied","Data":"b85a148500e7e55d6edacb108d46f25ef2751723f254db6c9122369b2c6fd4fd"} Dec 09 17:00:06 crc kubenswrapper[4840]: I1209 17:00:06.494276 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b85a148500e7e55d6edacb108d46f25ef2751723f254db6c9122369b2c6fd4fd" Dec 09 17:00:06 crc kubenswrapper[4840]: I1209 17:00:06.494444 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 17:00:06 crc kubenswrapper[4840]: I1209 17:00:06.619050 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" path="/var/lib/kubelet/pods/dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2/volumes" Dec 09 17:00:07 crc kubenswrapper[4840]: I1209 17:00:07.504852 4840 generic.go:334] "Generic (PLEG): container finished" podID="7258ff5e-2dee-4812-a494-27a3aa0940a8" containerID="5f5bee2c6ead2ab0269a91a8afb601214d7c8adb637f3aff5a55cadd6b987a93" exitCode=0 Dec 09 17:00:07 crc kubenswrapper[4840]: I1209 17:00:07.505103 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjwsx" event={"ID":"7258ff5e-2dee-4812-a494-27a3aa0940a8","Type":"ContainerDied","Data":"5f5bee2c6ead2ab0269a91a8afb601214d7c8adb637f3aff5a55cadd6b987a93"} Dec 09 17:00:07 crc kubenswrapper[4840]: I1209 17:00:07.964632 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.064482 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-utilities\") pod \"7258ff5e-2dee-4812-a494-27a3aa0940a8\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.064544 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-catalog-content\") pod \"7258ff5e-2dee-4812-a494-27a3aa0940a8\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.064588 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nm8st\" (UniqueName: \"kubernetes.io/projected/7258ff5e-2dee-4812-a494-27a3aa0940a8-kube-api-access-nm8st\") pod \"7258ff5e-2dee-4812-a494-27a3aa0940a8\" (UID: \"7258ff5e-2dee-4812-a494-27a3aa0940a8\") " Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.065539 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-utilities" (OuterVolumeSpecName: "utilities") pod "7258ff5e-2dee-4812-a494-27a3aa0940a8" (UID: "7258ff5e-2dee-4812-a494-27a3aa0940a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.071049 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7258ff5e-2dee-4812-a494-27a3aa0940a8-kube-api-access-nm8st" (OuterVolumeSpecName: "kube-api-access-nm8st") pod "7258ff5e-2dee-4812-a494-27a3aa0940a8" (UID: "7258ff5e-2dee-4812-a494-27a3aa0940a8"). InnerVolumeSpecName "kube-api-access-nm8st". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.166265 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nm8st\" (UniqueName: \"kubernetes.io/projected/7258ff5e-2dee-4812-a494-27a3aa0940a8-kube-api-access-nm8st\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.166328 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.194926 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7258ff5e-2dee-4812-a494-27a3aa0940a8" (UID: "7258ff5e-2dee-4812-a494-27a3aa0940a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.267598 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7258ff5e-2dee-4812-a494-27a3aa0940a8-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.524779 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjwsx" event={"ID":"7258ff5e-2dee-4812-a494-27a3aa0940a8","Type":"ContainerDied","Data":"3b311f2701ba2a91e586dd89e60c2cf24077fbe84ea14435e70afce2e06ee4d6"} Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.524829 4840 scope.go:117] "RemoveContainer" containerID="5f5bee2c6ead2ab0269a91a8afb601214d7c8adb637f3aff5a55cadd6b987a93" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.524880 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xjwsx" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.560433 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xjwsx"] Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.563706 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xjwsx"] Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.596310 4840 scope.go:117] "RemoveContainer" containerID="e16ca5aed0211e6da82ab5ff1048ab4c26719d79d045152f62298b0cf5e38521" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.620592 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7258ff5e-2dee-4812-a494-27a3aa0940a8" path="/var/lib/kubelet/pods/7258ff5e-2dee-4812-a494-27a3aa0940a8/volumes" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.624348 4840 scope.go:117] "RemoveContainer" containerID="4926fc9a5a23f2997cda6469f5adf396fbe00ee6bb112db086239d1331fd7a37" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846031 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 09 17:00:08 crc kubenswrapper[4840]: E1209 17:00:08.846225 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7258ff5e-2dee-4812-a494-27a3aa0940a8" containerName="extract-content" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846237 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7258ff5e-2dee-4812-a494-27a3aa0940a8" containerName="extract-content" Dec 09 17:00:08 crc kubenswrapper[4840]: E1209 17:00:08.846248 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7258ff5e-2dee-4812-a494-27a3aa0940a8" containerName="extract-utilities" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846253 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7258ff5e-2dee-4812-a494-27a3aa0940a8" containerName="extract-utilities" Dec 09 17:00:08 crc kubenswrapper[4840]: E1209 17:00:08.846263 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" containerName="extract-utilities" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846268 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" containerName="extract-utilities" Dec 09 17:00:08 crc kubenswrapper[4840]: E1209 17:00:08.846277 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7258ff5e-2dee-4812-a494-27a3aa0940a8" containerName="registry-server" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846283 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7258ff5e-2dee-4812-a494-27a3aa0940a8" containerName="registry-server" Dec 09 17:00:08 crc kubenswrapper[4840]: E1209 17:00:08.846295 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" containerName="registry-server" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846302 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" containerName="registry-server" Dec 09 17:00:08 crc kubenswrapper[4840]: E1209 17:00:08.846309 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a94a9d5f-9e10-4f8c-a992-abfb95761773" containerName="pruner" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846315 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="a94a9d5f-9e10-4f8c-a992-abfb95761773" containerName="pruner" Dec 09 17:00:08 crc kubenswrapper[4840]: E1209 17:00:08.846322 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" containerName="extract-content" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846343 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" containerName="extract-content" Dec 09 17:00:08 crc kubenswrapper[4840]: E1209 17:00:08.846353 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1c95712-598d-415d-b080-c5b7430d6186" containerName="collect-profiles" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846358 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1c95712-598d-415d-b080-c5b7430d6186" containerName="collect-profiles" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846436 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1c95712-598d-415d-b080-c5b7430d6186" containerName="collect-profiles" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846446 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7258ff5e-2dee-4812-a494-27a3aa0940a8" containerName="registry-server" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846453 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="a94a9d5f-9e10-4f8c-a992-abfb95761773" containerName="pruner" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846464 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd2f80a2-6b66-48e1-8d5d-5fd35c682ae2" containerName="registry-server" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.846806 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.848915 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.849597 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.858779 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.874693 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9096f725-037d-47fd-a7f8-61a896113bc5-kube-api-access\") pod \"installer-9-crc\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.874749 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-kubelet-dir\") pod \"installer-9-crc\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.874826 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-var-lock\") pod \"installer-9-crc\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.976318 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-var-lock\") pod \"installer-9-crc\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.976619 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9096f725-037d-47fd-a7f8-61a896113bc5-kube-api-access\") pod \"installer-9-crc\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.976642 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-kubelet-dir\") pod \"installer-9-crc\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.976411 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-var-lock\") pod \"installer-9-crc\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.976791 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-kubelet-dir\") pod \"installer-9-crc\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:08 crc kubenswrapper[4840]: I1209 17:00:08.997922 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9096f725-037d-47fd-a7f8-61a896113bc5-kube-api-access\") pod \"installer-9-crc\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:09 crc kubenswrapper[4840]: I1209 17:00:09.169003 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:09 crc kubenswrapper[4840]: I1209 17:00:09.534277 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rgpwz" event={"ID":"470d5b30-6a3d-4d02-9ef4-ce35ea66af80","Type":"ContainerStarted","Data":"4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e"} Dec 09 17:00:09 crc kubenswrapper[4840]: I1209 17:00:09.538636 4840 generic.go:334] "Generic (PLEG): container finished" podID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" containerID="c7e8362b812f32b3cc2478ff9c22ba590223728ed59b6eda33d7f5ff987d80e9" exitCode=0 Dec 09 17:00:09 crc kubenswrapper[4840]: I1209 17:00:09.538733 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcvgz" event={"ID":"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7","Type":"ContainerDied","Data":"c7e8362b812f32b3cc2478ff9c22ba590223728ed59b6eda33d7f5ff987d80e9"} Dec 09 17:00:09 crc kubenswrapper[4840]: I1209 17:00:09.553948 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rgpwz" podStartSLOduration=2.735243246 podStartE2EDuration="51.553919352s" podCreationTimestamp="2025-12-09 16:59:18 +0000 UTC" firstStartedPulling="2025-12-09 16:59:19.88741687 +0000 UTC m=+145.878527503" lastFinishedPulling="2025-12-09 17:00:08.706092966 +0000 UTC m=+194.697203609" observedRunningTime="2025-12-09 17:00:09.551863819 +0000 UTC m=+195.542974442" watchObservedRunningTime="2025-12-09 17:00:09.553919352 +0000 UTC m=+195.545029985" Dec 09 17:00:09 crc kubenswrapper[4840]: I1209 17:00:09.637421 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 09 17:00:13 crc kubenswrapper[4840]: W1209 17:00:13.056130 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod9096f725_037d_47fd_a7f8_61a896113bc5.slice/crio-ae81920998231f6aa9e705267bec6b4ec2e9e1844dfe35450c80c22712462daa WatchSource:0}: Error finding container ae81920998231f6aa9e705267bec6b4ec2e9e1844dfe35450c80c22712462daa: Status 404 returned error can't find the container with id ae81920998231f6aa9e705267bec6b4ec2e9e1844dfe35450c80c22712462daa Dec 09 17:00:13 crc kubenswrapper[4840]: I1209 17:00:13.569041 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9096f725-037d-47fd-a7f8-61a896113bc5","Type":"ContainerStarted","Data":"ae81920998231f6aa9e705267bec6b4ec2e9e1844dfe35450c80c22712462daa"} Dec 09 17:00:15 crc kubenswrapper[4840]: I1209 17:00:15.585280 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9096f725-037d-47fd-a7f8-61a896113bc5","Type":"ContainerStarted","Data":"ecc3a974d85638f2684d0d820c75d70857b6c93097a516ce6f367e40472311f6"} Dec 09 17:00:15 crc kubenswrapper[4840]: I1209 17:00:15.589254 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcvgz" event={"ID":"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7","Type":"ContainerStarted","Data":"11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76"} Dec 09 17:00:15 crc kubenswrapper[4840]: I1209 17:00:15.591788 4840 generic.go:334] "Generic (PLEG): container finished" podID="d12f03b6-5a9d-479e-9e73-2f2476161d97" containerID="768552dff795102188290b5b817241c75a2bcc9ef72fc055942a63bc0676157d" exitCode=0 Dec 09 17:00:15 crc kubenswrapper[4840]: I1209 17:00:15.591829 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-br44r" event={"ID":"d12f03b6-5a9d-479e-9e73-2f2476161d97","Type":"ContainerDied","Data":"768552dff795102188290b5b817241c75a2bcc9ef72fc055942a63bc0676157d"} Dec 09 17:00:15 crc kubenswrapper[4840]: I1209 17:00:15.596388 4840 generic.go:334] "Generic (PLEG): container finished" podID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" containerID="6093dc3c9db037e86ef92eb43b223acc8037c6340aaba41a19fb493c1e37e9b3" exitCode=0 Dec 09 17:00:15 crc kubenswrapper[4840]: I1209 17:00:15.596429 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78kws" event={"ID":"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f","Type":"ContainerDied","Data":"6093dc3c9db037e86ef92eb43b223acc8037c6340aaba41a19fb493c1e37e9b3"} Dec 09 17:00:15 crc kubenswrapper[4840]: I1209 17:00:15.612993 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=7.612939333 podStartE2EDuration="7.612939333s" podCreationTimestamp="2025-12-09 17:00:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:00:15.607904503 +0000 UTC m=+201.599015176" watchObservedRunningTime="2025-12-09 17:00:15.612939333 +0000 UTC m=+201.604050006" Dec 09 17:00:15 crc kubenswrapper[4840]: I1209 17:00:15.679423 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pcvgz" podStartSLOduration=3.650643994 podStartE2EDuration="57.679408911s" podCreationTimestamp="2025-12-09 16:59:18 +0000 UTC" firstStartedPulling="2025-12-09 16:59:20.956480508 +0000 UTC m=+146.947591141" lastFinishedPulling="2025-12-09 17:00:14.985245425 +0000 UTC m=+200.976356058" observedRunningTime="2025-12-09 17:00:15.676276717 +0000 UTC m=+201.667387350" watchObservedRunningTime="2025-12-09 17:00:15.679408911 +0000 UTC m=+201.670519534" Dec 09 17:00:16 crc kubenswrapper[4840]: I1209 17:00:16.602861 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-br44r" event={"ID":"d12f03b6-5a9d-479e-9e73-2f2476161d97","Type":"ContainerStarted","Data":"91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02"} Dec 09 17:00:16 crc kubenswrapper[4840]: I1209 17:00:16.621129 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-br44r" podStartSLOduration=3.545294359 podStartE2EDuration="58.621107632s" podCreationTimestamp="2025-12-09 16:59:18 +0000 UTC" firstStartedPulling="2025-12-09 16:59:20.956691564 +0000 UTC m=+146.947802197" lastFinishedPulling="2025-12-09 17:00:16.032504827 +0000 UTC m=+202.023615470" observedRunningTime="2025-12-09 17:00:16.618723301 +0000 UTC m=+202.609833944" watchObservedRunningTime="2025-12-09 17:00:16.621107632 +0000 UTC m=+202.612218265" Dec 09 17:00:17 crc kubenswrapper[4840]: I1209 17:00:17.610231 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78kws" event={"ID":"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f","Type":"ContainerStarted","Data":"5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958"} Dec 09 17:00:17 crc kubenswrapper[4840]: I1209 17:00:17.640068 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-78kws" podStartSLOduration=3.760434985 podStartE2EDuration="59.640032392s" podCreationTimestamp="2025-12-09 16:59:18 +0000 UTC" firstStartedPulling="2025-12-09 16:59:20.948209716 +0000 UTC m=+146.939320339" lastFinishedPulling="2025-12-09 17:00:16.827807103 +0000 UTC m=+202.818917746" observedRunningTime="2025-12-09 17:00:17.63661788 +0000 UTC m=+203.627728523" watchObservedRunningTime="2025-12-09 17:00:17.640032392 +0000 UTC m=+203.631143025" Dec 09 17:00:18 crc kubenswrapper[4840]: I1209 17:00:18.495121 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-br44r" Dec 09 17:00:18 crc kubenswrapper[4840]: I1209 17:00:18.495229 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-br44r" Dec 09 17:00:18 crc kubenswrapper[4840]: I1209 17:00:18.554598 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-br44r" Dec 09 17:00:18 crc kubenswrapper[4840]: I1209 17:00:18.647013 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rgpwz" Dec 09 17:00:18 crc kubenswrapper[4840]: I1209 17:00:18.647056 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rgpwz" Dec 09 17:00:18 crc kubenswrapper[4840]: I1209 17:00:18.692131 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rgpwz" Dec 09 17:00:18 crc kubenswrapper[4840]: I1209 17:00:18.859685 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-78kws" Dec 09 17:00:18 crc kubenswrapper[4840]: I1209 17:00:18.860115 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-78kws" Dec 09 17:00:18 crc kubenswrapper[4840]: I1209 17:00:18.904793 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-78kws" Dec 09 17:00:19 crc kubenswrapper[4840]: I1209 17:00:19.079416 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pcvgz" Dec 09 17:00:19 crc kubenswrapper[4840]: I1209 17:00:19.079856 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pcvgz" Dec 09 17:00:19 crc kubenswrapper[4840]: I1209 17:00:19.141874 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pcvgz" Dec 09 17:00:19 crc kubenswrapper[4840]: I1209 17:00:19.660958 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rgpwz" Dec 09 17:00:20 crc kubenswrapper[4840]: I1209 17:00:20.683980 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pcvgz" Dec 09 17:00:22 crc kubenswrapper[4840]: I1209 17:00:22.194425 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pcvgz"] Dec 09 17:00:22 crc kubenswrapper[4840]: I1209 17:00:22.637639 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pcvgz" podUID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" containerName="registry-server" containerID="cri-o://11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76" gracePeriod=2 Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.559790 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pcvgz" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.647527 4840 generic.go:334] "Generic (PLEG): container finished" podID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" containerID="11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76" exitCode=0 Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.647605 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcvgz" event={"ID":"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7","Type":"ContainerDied","Data":"11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76"} Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.647664 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcvgz" event={"ID":"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7","Type":"ContainerDied","Data":"bf4cf7db369f26800bc5d8a3f6f9574989722631093dd532faca1027601c80dd"} Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.647702 4840 scope.go:117] "RemoveContainer" containerID="11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.647718 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pcvgz" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.669442 4840 scope.go:117] "RemoveContainer" containerID="c7e8362b812f32b3cc2478ff9c22ba590223728ed59b6eda33d7f5ff987d80e9" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.690653 4840 scope.go:117] "RemoveContainer" containerID="e4a1cc3d6bf4176bb8d03076c9619ab068d9f21d8f4ae270f96a407529077137" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.713054 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-catalog-content\") pod \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.713094 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-utilities\") pod \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.713198 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8cmr\" (UniqueName: \"kubernetes.io/projected/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-kube-api-access-h8cmr\") pod \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\" (UID: \"8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7\") " Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.714887 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-utilities" (OuterVolumeSpecName: "utilities") pod "8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" (UID: "8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.718852 4840 scope.go:117] "RemoveContainer" containerID="11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.719227 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-kube-api-access-h8cmr" (OuterVolumeSpecName: "kube-api-access-h8cmr") pod "8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" (UID: "8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7"). InnerVolumeSpecName "kube-api-access-h8cmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:00:23 crc kubenswrapper[4840]: E1209 17:00:23.719361 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76\": container with ID starting with 11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76 not found: ID does not exist" containerID="11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.719401 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76"} err="failed to get container status \"11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76\": rpc error: code = NotFound desc = could not find container \"11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76\": container with ID starting with 11ea49f44a361bf935c38488ce2b15df212cb5d59314bcdcd85d0545e2b73e76 not found: ID does not exist" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.719427 4840 scope.go:117] "RemoveContainer" containerID="c7e8362b812f32b3cc2478ff9c22ba590223728ed59b6eda33d7f5ff987d80e9" Dec 09 17:00:23 crc kubenswrapper[4840]: E1209 17:00:23.719754 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7e8362b812f32b3cc2478ff9c22ba590223728ed59b6eda33d7f5ff987d80e9\": container with ID starting with c7e8362b812f32b3cc2478ff9c22ba590223728ed59b6eda33d7f5ff987d80e9 not found: ID does not exist" containerID="c7e8362b812f32b3cc2478ff9c22ba590223728ed59b6eda33d7f5ff987d80e9" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.719837 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7e8362b812f32b3cc2478ff9c22ba590223728ed59b6eda33d7f5ff987d80e9"} err="failed to get container status \"c7e8362b812f32b3cc2478ff9c22ba590223728ed59b6eda33d7f5ff987d80e9\": rpc error: code = NotFound desc = could not find container \"c7e8362b812f32b3cc2478ff9c22ba590223728ed59b6eda33d7f5ff987d80e9\": container with ID starting with c7e8362b812f32b3cc2478ff9c22ba590223728ed59b6eda33d7f5ff987d80e9 not found: ID does not exist" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.719872 4840 scope.go:117] "RemoveContainer" containerID="e4a1cc3d6bf4176bb8d03076c9619ab068d9f21d8f4ae270f96a407529077137" Dec 09 17:00:23 crc kubenswrapper[4840]: E1209 17:00:23.720169 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4a1cc3d6bf4176bb8d03076c9619ab068d9f21d8f4ae270f96a407529077137\": container with ID starting with e4a1cc3d6bf4176bb8d03076c9619ab068d9f21d8f4ae270f96a407529077137 not found: ID does not exist" containerID="e4a1cc3d6bf4176bb8d03076c9619ab068d9f21d8f4ae270f96a407529077137" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.720195 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4a1cc3d6bf4176bb8d03076c9619ab068d9f21d8f4ae270f96a407529077137"} err="failed to get container status \"e4a1cc3d6bf4176bb8d03076c9619ab068d9f21d8f4ae270f96a407529077137\": rpc error: code = NotFound desc = could not find container \"e4a1cc3d6bf4176bb8d03076c9619ab068d9f21d8f4ae270f96a407529077137\": container with ID starting with e4a1cc3d6bf4176bb8d03076c9619ab068d9f21d8f4ae270f96a407529077137 not found: ID does not exist" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.772194 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" (UID: "8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.814739 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.814774 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:23 crc kubenswrapper[4840]: I1209 17:00:23.814789 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8cmr\" (UniqueName: \"kubernetes.io/projected/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7-kube-api-access-h8cmr\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:24 crc kubenswrapper[4840]: I1209 17:00:24.002357 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pcvgz"] Dec 09 17:00:24 crc kubenswrapper[4840]: I1209 17:00:24.008231 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pcvgz"] Dec 09 17:00:24 crc kubenswrapper[4840]: I1209 17:00:24.620291 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" path="/var/lib/kubelet/pods/8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7/volumes" Dec 09 17:00:27 crc kubenswrapper[4840]: I1209 17:00:27.103204 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" podUID="63ab647f-02b8-49c7-8f8b-622a0fcd73bf" containerName="oauth-openshift" containerID="cri-o://ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c" gracePeriod=15 Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.543603 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-br44r" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.625283 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.683731 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-error\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.683785 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-login\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.683813 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-ocp-branding-template\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.683845 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-policies\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.683866 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-serving-cert\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.683887 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-cliconfig\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.683914 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-idp-0-file-data\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.683940 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-router-certs\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.683982 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-dir\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.684014 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ns4wt\" (UniqueName: \"kubernetes.io/projected/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-kube-api-access-ns4wt\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.684035 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-trusted-ca-bundle\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.684061 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-service-ca\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.684094 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-provider-selection\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.684118 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-session\") pod \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\" (UID: \"63ab647f-02b8-49c7-8f8b-622a0fcd73bf\") " Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.684998 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.685012 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.685054 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.685092 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.685542 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.689396 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.689780 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.690691 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.690714 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.692770 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.693079 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-kube-api-access-ns4wt" (OuterVolumeSpecName: "kube-api-access-ns4wt") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "kube-api-access-ns4wt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.694897 4840 generic.go:334] "Generic (PLEG): container finished" podID="63ab647f-02b8-49c7-8f8b-622a0fcd73bf" containerID="ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c" exitCode=0 Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.695018 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" event={"ID":"63ab647f-02b8-49c7-8f8b-622a0fcd73bf","Type":"ContainerDied","Data":"ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c"} Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.695122 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" event={"ID":"63ab647f-02b8-49c7-8f8b-622a0fcd73bf","Type":"ContainerDied","Data":"a26154a00eb30705931a9ebbcf3a3f4eae2dadd43829cef4f696685eadd6b4b9"} Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.695217 4840 scope.go:117] "RemoveContainer" containerID="ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.695529 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pr2p8" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.697221 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.698523 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.699589 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "63ab647f-02b8-49c7-8f8b-622a0fcd73bf" (UID: "63ab647f-02b8-49c7-8f8b-622a0fcd73bf"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.729609 4840 scope.go:117] "RemoveContainer" containerID="ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c" Dec 09 17:00:28 crc kubenswrapper[4840]: E1209 17:00:28.730546 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c\": container with ID starting with ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c not found: ID does not exist" containerID="ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.730578 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c"} err="failed to get container status \"ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c\": rpc error: code = NotFound desc = could not find container \"ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c\": container with ID starting with ff8eb489f59839202cab6b48959f5241d5b63cbb1dc11ccf9c18bf9bc3ea970c not found: ID does not exist" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.784838 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.784868 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.784879 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.784909 4840 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.784920 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ns4wt\" (UniqueName: \"kubernetes.io/projected/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-kube-api-access-ns4wt\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.784931 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.784940 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.784949 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.784958 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.784992 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.785001 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.785009 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.785019 4840 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.785027 4840 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/63ab647f-02b8-49c7-8f8b-622a0fcd73bf-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.904082 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-78kws" Dec 09 17:00:28 crc kubenswrapper[4840]: I1209 17:00:28.947799 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-78kws"] Dec 09 17:00:29 crc kubenswrapper[4840]: I1209 17:00:29.034875 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pr2p8"] Dec 09 17:00:29 crc kubenswrapper[4840]: I1209 17:00:29.039526 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pr2p8"] Dec 09 17:00:29 crc kubenswrapper[4840]: I1209 17:00:29.702345 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-78kws" podUID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" containerName="registry-server" containerID="cri-o://5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958" gracePeriod=2 Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.115730 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-78kws" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.303485 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-catalog-content\") pod \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.303691 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-utilities\") pod \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.303777 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkdgq\" (UniqueName: \"kubernetes.io/projected/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-kube-api-access-dkdgq\") pod \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\" (UID: \"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f\") " Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.305695 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-utilities" (OuterVolumeSpecName: "utilities") pod "2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" (UID: "2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.309088 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-kube-api-access-dkdgq" (OuterVolumeSpecName: "kube-api-access-dkdgq") pod "2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" (UID: "2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f"). InnerVolumeSpecName "kube-api-access-dkdgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.351452 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" (UID: "2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.405271 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.405314 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkdgq\" (UniqueName: \"kubernetes.io/projected/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-kube-api-access-dkdgq\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.405329 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.616420 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63ab647f-02b8-49c7-8f8b-622a0fcd73bf" path="/var/lib/kubelet/pods/63ab647f-02b8-49c7-8f8b-622a0fcd73bf/volumes" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.711780 4840 generic.go:334] "Generic (PLEG): container finished" podID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" containerID="5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958" exitCode=0 Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.711821 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78kws" event={"ID":"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f","Type":"ContainerDied","Data":"5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958"} Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.711871 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78kws" event={"ID":"2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f","Type":"ContainerDied","Data":"86999334f9bd87b1f7c4086e3783d7ea8d4ddbda774abf4902a7259fd2e9f03a"} Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.711894 4840 scope.go:117] "RemoveContainer" containerID="5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.711898 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-78kws" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.732367 4840 scope.go:117] "RemoveContainer" containerID="6093dc3c9db037e86ef92eb43b223acc8037c6340aaba41a19fb493c1e37e9b3" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.733109 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-78kws"] Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.736794 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-78kws"] Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.754291 4840 scope.go:117] "RemoveContainer" containerID="d44d2c88bb49fde2f54c77eca08aebc9a7acf1225f5bc2bb2b45f2e7a9e08eff" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.775070 4840 scope.go:117] "RemoveContainer" containerID="5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958" Dec 09 17:00:30 crc kubenswrapper[4840]: E1209 17:00:30.775689 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958\": container with ID starting with 5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958 not found: ID does not exist" containerID="5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.775735 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958"} err="failed to get container status \"5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958\": rpc error: code = NotFound desc = could not find container \"5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958\": container with ID starting with 5f9019e4973190faeb164282759371da9eef37612e71e6c73f790f9f9d31c958 not found: ID does not exist" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.775763 4840 scope.go:117] "RemoveContainer" containerID="6093dc3c9db037e86ef92eb43b223acc8037c6340aaba41a19fb493c1e37e9b3" Dec 09 17:00:30 crc kubenswrapper[4840]: E1209 17:00:30.776389 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6093dc3c9db037e86ef92eb43b223acc8037c6340aaba41a19fb493c1e37e9b3\": container with ID starting with 6093dc3c9db037e86ef92eb43b223acc8037c6340aaba41a19fb493c1e37e9b3 not found: ID does not exist" containerID="6093dc3c9db037e86ef92eb43b223acc8037c6340aaba41a19fb493c1e37e9b3" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.776431 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6093dc3c9db037e86ef92eb43b223acc8037c6340aaba41a19fb493c1e37e9b3"} err="failed to get container status \"6093dc3c9db037e86ef92eb43b223acc8037c6340aaba41a19fb493c1e37e9b3\": rpc error: code = NotFound desc = could not find container \"6093dc3c9db037e86ef92eb43b223acc8037c6340aaba41a19fb493c1e37e9b3\": container with ID starting with 6093dc3c9db037e86ef92eb43b223acc8037c6340aaba41a19fb493c1e37e9b3 not found: ID does not exist" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.776462 4840 scope.go:117] "RemoveContainer" containerID="d44d2c88bb49fde2f54c77eca08aebc9a7acf1225f5bc2bb2b45f2e7a9e08eff" Dec 09 17:00:30 crc kubenswrapper[4840]: E1209 17:00:30.776927 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d44d2c88bb49fde2f54c77eca08aebc9a7acf1225f5bc2bb2b45f2e7a9e08eff\": container with ID starting with d44d2c88bb49fde2f54c77eca08aebc9a7acf1225f5bc2bb2b45f2e7a9e08eff not found: ID does not exist" containerID="d44d2c88bb49fde2f54c77eca08aebc9a7acf1225f5bc2bb2b45f2e7a9e08eff" Dec 09 17:00:30 crc kubenswrapper[4840]: I1209 17:00:30.776953 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d44d2c88bb49fde2f54c77eca08aebc9a7acf1225f5bc2bb2b45f2e7a9e08eff"} err="failed to get container status \"d44d2c88bb49fde2f54c77eca08aebc9a7acf1225f5bc2bb2b45f2e7a9e08eff\": rpc error: code = NotFound desc = could not find container \"d44d2c88bb49fde2f54c77eca08aebc9a7acf1225f5bc2bb2b45f2e7a9e08eff\": container with ID starting with d44d2c88bb49fde2f54c77eca08aebc9a7acf1225f5bc2bb2b45f2e7a9e08eff not found: ID does not exist" Dec 09 17:00:32 crc kubenswrapper[4840]: I1209 17:00:32.619054 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" path="/var/lib/kubelet/pods/2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f/volumes" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.037081 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.037203 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.037279 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.038468 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.038591 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c" gracePeriod=600 Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.234945 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h"] Dec 09 17:00:34 crc kubenswrapper[4840]: E1209 17:00:34.235780 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" containerName="extract-utilities" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.236050 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" containerName="extract-utilities" Dec 09 17:00:34 crc kubenswrapper[4840]: E1209 17:00:34.236286 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63ab647f-02b8-49c7-8f8b-622a0fcd73bf" containerName="oauth-openshift" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.236462 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="63ab647f-02b8-49c7-8f8b-622a0fcd73bf" containerName="oauth-openshift" Dec 09 17:00:34 crc kubenswrapper[4840]: E1209 17:00:34.236656 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" containerName="extract-content" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.236864 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" containerName="extract-content" Dec 09 17:00:34 crc kubenswrapper[4840]: E1209 17:00:34.237112 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" containerName="registry-server" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.237288 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" containerName="registry-server" Dec 09 17:00:34 crc kubenswrapper[4840]: E1209 17:00:34.237564 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" containerName="extract-content" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.237745 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" containerName="extract-content" Dec 09 17:00:34 crc kubenswrapper[4840]: E1209 17:00:34.237925 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" containerName="extract-utilities" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.238155 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" containerName="extract-utilities" Dec 09 17:00:34 crc kubenswrapper[4840]: E1209 17:00:34.238325 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" containerName="registry-server" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.238482 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" containerName="registry-server" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.239008 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="63ab647f-02b8-49c7-8f8b-622a0fcd73bf" containerName="oauth-openshift" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.239207 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4f4dbe-dafb-445c-b41c-025bbeb0cb1f" containerName="registry-server" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.239370 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a4a4ee4-58ad-4ec5-92e1-bfa895b28cb7" containerName="registry-server" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.240465 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.255635 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h"] Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.256265 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.257266 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.257290 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.257436 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.257679 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.259667 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.259668 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.260179 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.260480 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.260602 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.260509 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.260919 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.268259 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.269009 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.272905 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.356231 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-session\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.356894 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357006 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357084 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357339 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-router-certs\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357398 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-audit-policies\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357432 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-template-error\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357472 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzw7f\" (UniqueName: \"kubernetes.io/projected/83d5f692-253c-47e1-81da-6bd4cda1c6b2-kube-api-access-mzw7f\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357556 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/83d5f692-253c-47e1-81da-6bd4cda1c6b2-audit-dir\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357707 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357769 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357807 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357835 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-template-login\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.357881 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-service-ca\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.458505 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.458575 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-template-login\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.458619 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-service-ca\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.458678 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-session\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.458731 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.458952 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.459008 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.459046 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-router-certs\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.459078 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-audit-policies\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.459110 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-template-error\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.459138 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzw7f\" (UniqueName: \"kubernetes.io/projected/83d5f692-253c-47e1-81da-6bd4cda1c6b2-kube-api-access-mzw7f\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.459181 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/83d5f692-253c-47e1-81da-6bd4cda1c6b2-audit-dir\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.459227 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.459250 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.459855 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/83d5f692-253c-47e1-81da-6bd4cda1c6b2-audit-dir\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.460827 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-service-ca\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.461814 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.462079 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-audit-policies\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.462602 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.465213 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.465744 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.466320 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.466567 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-template-login\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.468114 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-session\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.468247 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-user-template-error\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.468615 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.469237 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/83d5f692-253c-47e1-81da-6bd4cda1c6b2-v4-0-config-system-router-certs\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.485683 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzw7f\" (UniqueName: \"kubernetes.io/projected/83d5f692-253c-47e1-81da-6bd4cda1c6b2-kube-api-access-mzw7f\") pod \"oauth-openshift-6fff5dcfd9-qd99h\" (UID: \"83d5f692-253c-47e1-81da-6bd4cda1c6b2\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.627649 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.742769 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c" exitCode=0 Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.742944 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c"} Dec 09 17:00:34 crc kubenswrapper[4840]: I1209 17:00:34.743123 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"55d6696c293e370b251a25d145325d574fbb55d04b257ef61902582c5658c786"} Dec 09 17:00:35 crc kubenswrapper[4840]: I1209 17:00:35.023584 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h"] Dec 09 17:00:35 crc kubenswrapper[4840]: W1209 17:00:35.030934 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83d5f692_253c_47e1_81da_6bd4cda1c6b2.slice/crio-3a2ed2177096e0a464351b94fce4b0dbf41aae49f5fd51c89ae02ee5c5dd12ae WatchSource:0}: Error finding container 3a2ed2177096e0a464351b94fce4b0dbf41aae49f5fd51c89ae02ee5c5dd12ae: Status 404 returned error can't find the container with id 3a2ed2177096e0a464351b94fce4b0dbf41aae49f5fd51c89ae02ee5c5dd12ae Dec 09 17:00:35 crc kubenswrapper[4840]: I1209 17:00:35.751677 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" event={"ID":"83d5f692-253c-47e1-81da-6bd4cda1c6b2","Type":"ContainerStarted","Data":"62f2c6c45112360a38a380075b51a887cface55db8ec4ceee6948134b5e87c95"} Dec 09 17:00:35 crc kubenswrapper[4840]: I1209 17:00:35.752059 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" event={"ID":"83d5f692-253c-47e1-81da-6bd4cda1c6b2","Type":"ContainerStarted","Data":"3a2ed2177096e0a464351b94fce4b0dbf41aae49f5fd51c89ae02ee5c5dd12ae"} Dec 09 17:00:35 crc kubenswrapper[4840]: I1209 17:00:35.752085 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:35 crc kubenswrapper[4840]: I1209 17:00:35.773413 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" podStartSLOduration=33.773395681 podStartE2EDuration="33.773395681s" podCreationTimestamp="2025-12-09 17:00:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:00:35.769943629 +0000 UTC m=+221.761054362" watchObservedRunningTime="2025-12-09 17:00:35.773395681 +0000 UTC m=+221.764506324" Dec 09 17:00:35 crc kubenswrapper[4840]: I1209 17:00:35.843173 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-qd99h" Dec 09 17:00:52 crc kubenswrapper[4840]: E1209 17:00:52.579554 4840 file.go:109] "Unable to process watch event" err="can't process config file \"/etc/kubernetes/manifests/kube-apiserver-startup-monitor-pod.yaml\": /etc/kubernetes/manifests/kube-apiserver-startup-monitor-pod.yaml: couldn't parse as pod(Object 'Kind' is missing in 'null'), please check config file" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.581793 4840 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.584166 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.585664 4840 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.586462 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265" gracePeriod=15 Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.586745 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e" gracePeriod=15 Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.586844 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093" gracePeriod=15 Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.587128 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d" gracePeriod=15 Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.592411 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a" gracePeriod=15 Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.603751 4840 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 09 17:00:52 crc kubenswrapper[4840]: E1209 17:00:52.607440 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.607786 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 17:00:52 crc kubenswrapper[4840]: E1209 17:00:52.607907 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.608660 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 17:00:52 crc kubenswrapper[4840]: E1209 17:00:52.608780 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.608857 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 09 17:00:52 crc kubenswrapper[4840]: E1209 17:00:52.609250 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.609355 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 09 17:00:52 crc kubenswrapper[4840]: E1209 17:00:52.609466 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.609560 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 09 17:00:52 crc kubenswrapper[4840]: E1209 17:00:52.609636 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.609713 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 09 17:00:52 crc kubenswrapper[4840]: E1209 17:00:52.609802 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.609881 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.610144 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.610251 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.610372 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.610455 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.610544 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.610633 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.612635 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.612774 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.613009 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.613107 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.613194 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: E1209 17:00:52.666908 4840 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.204:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.714240 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.714305 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.714339 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.714379 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.714409 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.714500 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.714542 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.714567 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.714711 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.714812 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.714867 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.715083 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.715153 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.815810 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.815875 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.815948 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.816005 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.816030 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.816099 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.850246 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.851678 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.852312 4840 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e" exitCode=0 Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.852339 4840 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093" exitCode=0 Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.852351 4840 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a" exitCode=0 Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.852361 4840 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d" exitCode=2 Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.852362 4840 scope.go:117] "RemoveContainer" containerID="efb497808d1a4e93c45331bb1f349b6d54436b3dbe9562979457852a5ed0cef8" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.853805 4840 generic.go:334] "Generic (PLEG): container finished" podID="9096f725-037d-47fd-a7f8-61a896113bc5" containerID="ecc3a974d85638f2684d0d820c75d70857b6c93097a516ce6f367e40472311f6" exitCode=0 Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.853833 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9096f725-037d-47fd-a7f8-61a896113bc5","Type":"ContainerDied","Data":"ecc3a974d85638f2684d0d820c75d70857b6c93097a516ce6f367e40472311f6"} Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.854421 4840 status_manager.go:851] "Failed to get status for pod" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:52 crc kubenswrapper[4840]: I1209 17:00:52.968113 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:52 crc kubenswrapper[4840]: W1209 17:00:52.992762 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-dd31f0597e264c96dd5c5d9bd580dc8dc15687fbf0440e0903412d35b13df69d WatchSource:0}: Error finding container dd31f0597e264c96dd5c5d9bd580dc8dc15687fbf0440e0903412d35b13df69d: Status 404 returned error can't find the container with id dd31f0597e264c96dd5c5d9bd580dc8dc15687fbf0440e0903412d35b13df69d Dec 09 17:00:52 crc kubenswrapper[4840]: E1209 17:00:52.996605 4840 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.204:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187f9aae72ef0c9d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-09 17:00:52.995779741 +0000 UTC m=+238.986890384,LastTimestamp:2025-12-09 17:00:52.995779741 +0000 UTC m=+238.986890384,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 09 17:00:53 crc kubenswrapper[4840]: E1209 17:00:53.452908 4840 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.204:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187f9aae72ef0c9d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-09 17:00:52.995779741 +0000 UTC m=+238.986890384,LastTimestamp:2025-12-09 17:00:52.995779741 +0000 UTC m=+238.986890384,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 09 17:00:53 crc kubenswrapper[4840]: I1209 17:00:53.863074 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2"} Dec 09 17:00:53 crc kubenswrapper[4840]: I1209 17:00:53.865674 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"dd31f0597e264c96dd5c5d9bd580dc8dc15687fbf0440e0903412d35b13df69d"} Dec 09 17:00:53 crc kubenswrapper[4840]: I1209 17:00:53.866370 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 09 17:00:53 crc kubenswrapper[4840]: E1209 17:00:53.866392 4840 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.204:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:53 crc kubenswrapper[4840]: I1209 17:00:53.866854 4840 status_manager.go:851] "Failed to get status for pod" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.147184 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.147795 4840 status_manager.go:851] "Failed to get status for pod" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.333856 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9096f725-037d-47fd-a7f8-61a896113bc5-kube-api-access\") pod \"9096f725-037d-47fd-a7f8-61a896113bc5\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.334118 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-var-lock\") pod \"9096f725-037d-47fd-a7f8-61a896113bc5\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.334237 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-kubelet-dir\") pod \"9096f725-037d-47fd-a7f8-61a896113bc5\" (UID: \"9096f725-037d-47fd-a7f8-61a896113bc5\") " Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.334316 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-var-lock" (OuterVolumeSpecName: "var-lock") pod "9096f725-037d-47fd-a7f8-61a896113bc5" (UID: "9096f725-037d-47fd-a7f8-61a896113bc5"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.334442 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "9096f725-037d-47fd-a7f8-61a896113bc5" (UID: "9096f725-037d-47fd-a7f8-61a896113bc5"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.334682 4840 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.334706 4840 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9096f725-037d-47fd-a7f8-61a896113bc5-var-lock\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.341344 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9096f725-037d-47fd-a7f8-61a896113bc5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "9096f725-037d-47fd-a7f8-61a896113bc5" (UID: "9096f725-037d-47fd-a7f8-61a896113bc5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.435717 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9096f725-037d-47fd-a7f8-61a896113bc5-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.611355 4840 status_manager.go:851] "Failed to get status for pod" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.875053 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9096f725-037d-47fd-a7f8-61a896113bc5","Type":"ContainerDied","Data":"ae81920998231f6aa9e705267bec6b4ec2e9e1844dfe35450c80c22712462daa"} Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.875325 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ae81920998231f6aa9e705267bec6b4ec2e9e1844dfe35450c80c22712462daa" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.875079 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 09 17:00:54 crc kubenswrapper[4840]: E1209 17:00:54.875643 4840 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.204:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.971080 4840 status_manager.go:851] "Failed to get status for pod" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.974880 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.975572 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.976131 4840 status_manager.go:851] "Failed to get status for pod" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:54 crc kubenswrapper[4840]: I1209 17:00:54.976778 4840 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.145489 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.145549 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.145603 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.145594 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.145674 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.145767 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.145945 4840 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.146047 4840 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.146062 4840 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 09 17:00:55 crc kubenswrapper[4840]: E1209 17:00:55.652715 4840 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.204:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" volumeName="registry-storage" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.886333 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.888579 4840 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265" exitCode=0 Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.888642 4840 scope.go:117] "RemoveContainer" containerID="fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.888802 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.907562 4840 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.908617 4840 status_manager.go:851] "Failed to get status for pod" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.912952 4840 scope.go:117] "RemoveContainer" containerID="36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.928530 4840 scope.go:117] "RemoveContainer" containerID="ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.945625 4840 scope.go:117] "RemoveContainer" containerID="4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.965157 4840 scope.go:117] "RemoveContainer" containerID="9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265" Dec 09 17:00:55 crc kubenswrapper[4840]: I1209 17:00:55.983675 4840 scope.go:117] "RemoveContainer" containerID="a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.007519 4840 scope.go:117] "RemoveContainer" containerID="fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.007854 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\": container with ID starting with fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e not found: ID does not exist" containerID="fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.007895 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e"} err="failed to get container status \"fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\": rpc error: code = NotFound desc = could not find container \"fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e\": container with ID starting with fa7654b653a91d08e648a7d0705533067c61e2f3dbfcd020efbe3b000eb9375e not found: ID does not exist" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.007921 4840 scope.go:117] "RemoveContainer" containerID="36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.008252 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\": container with ID starting with 36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093 not found: ID does not exist" containerID="36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.008295 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093"} err="failed to get container status \"36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\": rpc error: code = NotFound desc = could not find container \"36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093\": container with ID starting with 36b525e2e4c2732bc5adac795213f279815b4cab50ec51d360fc139c58f3b093 not found: ID does not exist" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.008332 4840 scope.go:117] "RemoveContainer" containerID="ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.008722 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\": container with ID starting with ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a not found: ID does not exist" containerID="ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.008754 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a"} err="failed to get container status \"ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\": rpc error: code = NotFound desc = could not find container \"ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a\": container with ID starting with ef9c8650120833efdce75ffec16fd0f6bf944c53afce1ac828a8cbe5590f7e1a not found: ID does not exist" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.008776 4840 scope.go:117] "RemoveContainer" containerID="4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.009072 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\": container with ID starting with 4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d not found: ID does not exist" containerID="4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.009099 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d"} err="failed to get container status \"4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\": rpc error: code = NotFound desc = could not find container \"4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d\": container with ID starting with 4691592d78fb0034db583d9323c40d2d695e7ab55f26ee640fc72877613f600d not found: ID does not exist" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.009119 4840 scope.go:117] "RemoveContainer" containerID="9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.009378 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\": container with ID starting with 9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265 not found: ID does not exist" containerID="9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.009401 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265"} err="failed to get container status \"9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\": rpc error: code = NotFound desc = could not find container \"9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265\": container with ID starting with 9f728b241aed3aca750988f38d2a2dac0c3bc27c31db6cd0bce077c176ecc265 not found: ID does not exist" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.009419 4840 scope.go:117] "RemoveContainer" containerID="a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.009688 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\": container with ID starting with a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75 not found: ID does not exist" containerID="a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.009708 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75"} err="failed to get container status \"a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\": rpc error: code = NotFound desc = could not find container \"a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75\": container with ID starting with a43a89021158bd7e591b806bb285374ce7a712b28bbf05b94f64ee46cb7dcc75 not found: ID does not exist" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.619481 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.879545 4840 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.880253 4840 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.880995 4840 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.881455 4840 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.882111 4840 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:00:56 crc kubenswrapper[4840]: I1209 17:00:56.882179 4840 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Dec 09 17:00:56 crc kubenswrapper[4840]: E1209 17:00:56.882641 4840 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" interval="200ms" Dec 09 17:00:57 crc kubenswrapper[4840]: E1209 17:00:57.083773 4840 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" interval="400ms" Dec 09 17:00:57 crc kubenswrapper[4840]: E1209 17:00:57.485222 4840 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" interval="800ms" Dec 09 17:00:58 crc kubenswrapper[4840]: E1209 17:00:58.286593 4840 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" interval="1.6s" Dec 09 17:00:59 crc kubenswrapper[4840]: E1209 17:00:59.888493 4840 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" interval="3.2s" Dec 09 17:01:03 crc kubenswrapper[4840]: E1209 17:01:03.089870 4840 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.204:6443: connect: connection refused" interval="6.4s" Dec 09 17:01:03 crc kubenswrapper[4840]: E1209 17:01:03.454697 4840 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.204:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187f9aae72ef0c9d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-09 17:00:52.995779741 +0000 UTC m=+238.986890384,LastTimestamp:2025-12-09 17:00:52.995779741 +0000 UTC m=+238.986890384,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 09 17:01:03 crc kubenswrapper[4840]: I1209 17:01:03.616230 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:01:03 crc kubenswrapper[4840]: I1209 17:01:03.617193 4840 status_manager.go:851] "Failed to get status for pod" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:01:03 crc kubenswrapper[4840]: I1209 17:01:03.643697 4840 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="700088c2-3a1d-468f-adf9-91f489a11014" Dec 09 17:01:03 crc kubenswrapper[4840]: I1209 17:01:03.643738 4840 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="700088c2-3a1d-468f-adf9-91f489a11014" Dec 09 17:01:03 crc kubenswrapper[4840]: E1209 17:01:03.644128 4840 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:01:03 crc kubenswrapper[4840]: I1209 17:01:03.644558 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:01:03 crc kubenswrapper[4840]: W1209 17:01:03.676735 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-7753e0e635e76728a5bfb8793d6fe7211baaab53ba913ce7f786abae9c58fbd8 WatchSource:0}: Error finding container 7753e0e635e76728a5bfb8793d6fe7211baaab53ba913ce7f786abae9c58fbd8: Status 404 returned error can't find the container with id 7753e0e635e76728a5bfb8793d6fe7211baaab53ba913ce7f786abae9c58fbd8 Dec 09 17:01:03 crc kubenswrapper[4840]: I1209 17:01:03.938278 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7753e0e635e76728a5bfb8793d6fe7211baaab53ba913ce7f786abae9c58fbd8"} Dec 09 17:01:04 crc kubenswrapper[4840]: I1209 17:01:04.619582 4840 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:01:04 crc kubenswrapper[4840]: I1209 17:01:04.620482 4840 status_manager.go:851] "Failed to get status for pod" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:01:04 crc kubenswrapper[4840]: I1209 17:01:04.947073 4840 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="99417fe35b74f585d954893a8a55250c392c228246d3ded2f59748f7e780f5fd" exitCode=0 Dec 09 17:01:04 crc kubenswrapper[4840]: I1209 17:01:04.947165 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"99417fe35b74f585d954893a8a55250c392c228246d3ded2f59748f7e780f5fd"} Dec 09 17:01:04 crc kubenswrapper[4840]: I1209 17:01:04.947568 4840 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="700088c2-3a1d-468f-adf9-91f489a11014" Dec 09 17:01:04 crc kubenswrapper[4840]: I1209 17:01:04.947608 4840 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="700088c2-3a1d-468f-adf9-91f489a11014" Dec 09 17:01:04 crc kubenswrapper[4840]: I1209 17:01:04.947822 4840 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:01:04 crc kubenswrapper[4840]: E1209 17:01:04.948039 4840 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:01:04 crc kubenswrapper[4840]: I1209 17:01:04.948362 4840 status_manager.go:851] "Failed to get status for pod" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.204:6443: connect: connection refused" Dec 09 17:01:05 crc kubenswrapper[4840]: I1209 17:01:05.955207 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"35a666c871d76a53b027124a5c08bc0766bc84f27a0acab4ff6892fc638cec63"} Dec 09 17:01:05 crc kubenswrapper[4840]: I1209 17:01:05.955552 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"cdf1a8eed205423fc0161e7c06423a3486cdf321e55604afaa7b4f6e8b6be3b6"} Dec 09 17:01:05 crc kubenswrapper[4840]: I1209 17:01:05.955570 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"14fc45e4287abe317512a8db1e6556cd1cf4705eb9b797db7171ac8136c3de7d"} Dec 09 17:01:05 crc kubenswrapper[4840]: I1209 17:01:05.955581 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"aa3bde15c1ad5058bb2983284beb96304e17f808f388ee0840d4902af79176e2"} Dec 09 17:01:05 crc kubenswrapper[4840]: I1209 17:01:05.957673 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 09 17:01:05 crc kubenswrapper[4840]: I1209 17:01:05.957712 4840 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012" exitCode=1 Dec 09 17:01:05 crc kubenswrapper[4840]: I1209 17:01:05.957731 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012"} Dec 09 17:01:05 crc kubenswrapper[4840]: I1209 17:01:05.958127 4840 scope.go:117] "RemoveContainer" containerID="e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012" Dec 09 17:01:06 crc kubenswrapper[4840]: I1209 17:01:06.965523 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 09 17:01:06 crc kubenswrapper[4840]: I1209 17:01:06.965631 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c7e82e964f5c17dca11555d96cd263e34c6c558e10bdc80468175b51c63b75f7"} Dec 09 17:01:06 crc kubenswrapper[4840]: I1209 17:01:06.967713 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3d4dee6b1f9601ecced00762498214e34db7d4307098b7f60376b4fc4cf154af"} Dec 09 17:01:06 crc kubenswrapper[4840]: I1209 17:01:06.967877 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:01:06 crc kubenswrapper[4840]: I1209 17:01:06.967938 4840 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="700088c2-3a1d-468f-adf9-91f489a11014" Dec 09 17:01:06 crc kubenswrapper[4840]: I1209 17:01:06.967982 4840 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="700088c2-3a1d-468f-adf9-91f489a11014" Dec 09 17:01:08 crc kubenswrapper[4840]: I1209 17:01:08.645805 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:01:08 crc kubenswrapper[4840]: I1209 17:01:08.646246 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:01:08 crc kubenswrapper[4840]: I1209 17:01:08.654426 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:01:08 crc kubenswrapper[4840]: I1209 17:01:08.755664 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 17:01:11 crc kubenswrapper[4840]: I1209 17:01:11.480552 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 17:01:11 crc kubenswrapper[4840]: I1209 17:01:11.480895 4840 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 09 17:01:11 crc kubenswrapper[4840]: I1209 17:01:11.480949 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 09 17:01:11 crc kubenswrapper[4840]: I1209 17:01:11.975446 4840 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:01:11 crc kubenswrapper[4840]: I1209 17:01:11.994919 4840 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="700088c2-3a1d-468f-adf9-91f489a11014" Dec 09 17:01:11 crc kubenswrapper[4840]: I1209 17:01:11.994953 4840 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="700088c2-3a1d-468f-adf9-91f489a11014" Dec 09 17:01:11 crc kubenswrapper[4840]: I1209 17:01:11.999821 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:01:12 crc kubenswrapper[4840]: I1209 17:01:12.002020 4840 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="3bbb38da-d1da-440b-b818-dd6c9e1fba80" Dec 09 17:01:13 crc kubenswrapper[4840]: I1209 17:01:13.007672 4840 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="700088c2-3a1d-468f-adf9-91f489a11014" Dec 09 17:01:13 crc kubenswrapper[4840]: I1209 17:01:13.007744 4840 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="700088c2-3a1d-468f-adf9-91f489a11014" Dec 09 17:01:14 crc kubenswrapper[4840]: I1209 17:01:14.634618 4840 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="3bbb38da-d1da-440b-b818-dd6c9e1fba80" Dec 09 17:01:21 crc kubenswrapper[4840]: I1209 17:01:21.019047 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 09 17:01:21 crc kubenswrapper[4840]: I1209 17:01:21.480747 4840 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 09 17:01:21 crc kubenswrapper[4840]: I1209 17:01:21.481114 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 09 17:01:21 crc kubenswrapper[4840]: I1209 17:01:21.902137 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 09 17:01:22 crc kubenswrapper[4840]: I1209 17:01:22.801884 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 09 17:01:22 crc kubenswrapper[4840]: I1209 17:01:22.822910 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 09 17:01:23 crc kubenswrapper[4840]: I1209 17:01:23.493918 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 09 17:01:23 crc kubenswrapper[4840]: I1209 17:01:23.958252 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 09 17:01:24 crc kubenswrapper[4840]: I1209 17:01:24.036292 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 09 17:01:24 crc kubenswrapper[4840]: I1209 17:01:24.096238 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 09 17:01:24 crc kubenswrapper[4840]: I1209 17:01:24.206396 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 09 17:01:24 crc kubenswrapper[4840]: I1209 17:01:24.568149 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 09 17:01:24 crc kubenswrapper[4840]: I1209 17:01:24.821505 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 09 17:01:24 crc kubenswrapper[4840]: I1209 17:01:24.826946 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 09 17:01:24 crc kubenswrapper[4840]: I1209 17:01:24.877512 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 09 17:01:25 crc kubenswrapper[4840]: I1209 17:01:25.173083 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 09 17:01:25 crc kubenswrapper[4840]: I1209 17:01:25.198480 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 09 17:01:25 crc kubenswrapper[4840]: I1209 17:01:25.348439 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 09 17:01:25 crc kubenswrapper[4840]: I1209 17:01:25.730114 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 09 17:01:25 crc kubenswrapper[4840]: I1209 17:01:25.797098 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 09 17:01:25 crc kubenswrapper[4840]: I1209 17:01:25.931132 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.148702 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.180217 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.326324 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.330715 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.359032 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.421501 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.481504 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.483396 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.484922 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.524097 4840 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.585277 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.810571 4840 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.850787 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 09 17:01:26 crc kubenswrapper[4840]: I1209 17:01:26.970614 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.044122 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.108589 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.143764 4840 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.202601 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.327523 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.351922 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.452196 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.529675 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.533298 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.537715 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.564320 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.617387 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.624714 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.628923 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.667833 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.893851 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 09 17:01:27 crc kubenswrapper[4840]: I1209 17:01:27.947403 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.035048 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.044735 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.135673 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.149167 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.151324 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.155409 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.158772 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.191846 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.245490 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.325339 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.335566 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.503358 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.509347 4840 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.514436 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.514490 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.514725 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.520152 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.534649 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.534352 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=17.534331512 podStartE2EDuration="17.534331512s" podCreationTimestamp="2025-12-09 17:01:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:01:28.533543729 +0000 UTC m=+274.524654372" watchObservedRunningTime="2025-12-09 17:01:28.534331512 +0000 UTC m=+274.525442145" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.534811 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.709911 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.710062 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.740520 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.788702 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 09 17:01:28 crc kubenswrapper[4840]: I1209 17:01:28.853745 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.053145 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.080770 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.164800 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.311010 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.311071 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.397167 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.471368 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.484038 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.494995 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.549076 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.559270 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.572904 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.578582 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.748136 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.752904 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.796602 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.874472 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.926362 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 09 17:01:29 crc kubenswrapper[4840]: I1209 17:01:29.944829 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.016632 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.078525 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.140861 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.146836 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.156850 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.244913 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.298726 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.301437 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.303600 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.307207 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.398146 4840 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.519136 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.550537 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.573840 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.589562 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.591528 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.637696 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.737937 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.895133 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.965089 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 09 17:01:30 crc kubenswrapper[4840]: I1209 17:01:30.971178 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.051600 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.137857 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.259284 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.262433 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.282716 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.314532 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.324918 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.326937 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.351899 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.369580 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.405513 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.433482 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.443323 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.480740 4840 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.480799 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.480852 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.481423 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"c7e82e964f5c17dca11555d96cd263e34c6c558e10bdc80468175b51c63b75f7"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.481533 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://c7e82e964f5c17dca11555d96cd263e34c6c558e10bdc80468175b51c63b75f7" gracePeriod=30 Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.592562 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.640697 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.659845 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.704737 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.740859 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.776218 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.780244 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.825122 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.883827 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.891600 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.913610 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 09 17:01:31 crc kubenswrapper[4840]: I1209 17:01:31.963623 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.050887 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.062092 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.112154 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.264907 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.283532 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.289711 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.296603 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.323505 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.371165 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.375301 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.445286 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.484209 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.492546 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.630776 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.698676 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.718233 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.724116 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.836805 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.848236 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.875842 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.946500 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.959084 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 09 17:01:32 crc kubenswrapper[4840]: I1209 17:01:32.966738 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.001928 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.047581 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.049954 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.070809 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.092061 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.142552 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.161565 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.169451 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.183234 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.238606 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.277792 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.376165 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.463487 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.472097 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.550303 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.587010 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.601460 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.614423 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.687527 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.723020 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.796582 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 09 17:01:33 crc kubenswrapper[4840]: I1209 17:01:33.801108 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.137425 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.177815 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.256685 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.298147 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.338177 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.439049 4840 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.439283 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2" gracePeriod=5 Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.475763 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.504769 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.518653 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.603119 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.663951 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.850946 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 09 17:01:34 crc kubenswrapper[4840]: I1209 17:01:34.964256 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:34.999915 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.126792 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.202864 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.255909 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.274870 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.286910 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.420291 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.549456 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.559407 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.773915 4840 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.800112 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.940910 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 09 17:01:35 crc kubenswrapper[4840]: I1209 17:01:35.997115 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.077595 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.080371 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.159244 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.174840 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.292398 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.295957 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.416454 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.451711 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.571043 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.602886 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.692900 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.722196 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.792401 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.826816 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.957309 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 09 17:01:36 crc kubenswrapper[4840]: I1209 17:01:36.972680 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.156058 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.158502 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.212566 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.232375 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.463533 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.506713 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.508535 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.515432 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.531110 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.555374 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.730379 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 09 17:01:37 crc kubenswrapper[4840]: I1209 17:01:37.923168 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 09 17:01:38 crc kubenswrapper[4840]: I1209 17:01:38.164112 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 09 17:01:38 crc kubenswrapper[4840]: I1209 17:01:38.339616 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 09 17:01:38 crc kubenswrapper[4840]: I1209 17:01:38.416242 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 09 17:01:38 crc kubenswrapper[4840]: I1209 17:01:38.523092 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 09 17:01:38 crc kubenswrapper[4840]: I1209 17:01:38.618099 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 09 17:01:38 crc kubenswrapper[4840]: I1209 17:01:38.712936 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 09 17:01:38 crc kubenswrapper[4840]: I1209 17:01:38.747606 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 09 17:01:38 crc kubenswrapper[4840]: I1209 17:01:38.801808 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 09 17:01:38 crc kubenswrapper[4840]: I1209 17:01:38.821901 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 09 17:01:38 crc kubenswrapper[4840]: I1209 17:01:38.896739 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 09 17:01:38 crc kubenswrapper[4840]: I1209 17:01:38.909499 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 09 17:01:39 crc kubenswrapper[4840]: I1209 17:01:39.002820 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 09 17:01:39 crc kubenswrapper[4840]: I1209 17:01:39.536255 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 09 17:01:39 crc kubenswrapper[4840]: I1209 17:01:39.742699 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 09 17:01:39 crc kubenswrapper[4840]: I1209 17:01:39.946729 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.039584 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.039661 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.040680 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.040713 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.040741 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.040784 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.040838 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.041054 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.041105 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.041222 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.041119 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.051090 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.085579 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.141784 4840 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.141829 4840 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.141842 4840 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.141853 4840 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.141864 4840 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.178219 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.178287 4840 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2" exitCode=137 Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.178334 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.178345 4840 scope.go:117] "RemoveContainer" containerID="0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.202558 4840 scope.go:117] "RemoveContainer" containerID="0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2" Dec 09 17:01:40 crc kubenswrapper[4840]: E1209 17:01:40.203103 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2\": container with ID starting with 0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2 not found: ID does not exist" containerID="0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.203154 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2"} err="failed to get container status \"0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2\": rpc error: code = NotFound desc = could not find container \"0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2\": container with ID starting with 0bacd2e3f8b97b3fe7fe69db9ffbc97b87e719477b75b0e1055338b2091875d2 not found: ID does not exist" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.411836 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.616727 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Dec 09 17:01:40 crc kubenswrapper[4840]: I1209 17:01:40.683841 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.228187 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-br44r"] Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.228809 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-br44r" podUID="d12f03b6-5a9d-479e-9e73-2f2476161d97" containerName="registry-server" containerID="cri-o://91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02" gracePeriod=30 Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.234699 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rgpwz"] Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.235063 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rgpwz" podUID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" containerName="registry-server" containerID="cri-o://4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e" gracePeriod=30 Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.246216 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2ckb5"] Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.246545 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" podUID="07b0580c-0d27-48ee-8f33-3c5d7638ac47" containerName="marketplace-operator" containerID="cri-o://d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79" gracePeriod=30 Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.252415 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xpnvf"] Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.252787 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xpnvf" podUID="7a467268-207b-41e8-927d-8bb4ce05c367" containerName="registry-server" containerID="cri-o://7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6" gracePeriod=30 Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.258305 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w2jxv"] Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.258599 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-w2jxv" podUID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" containerName="registry-server" containerID="cri-o://c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab" gracePeriod=30 Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.668435 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rgpwz" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.675472 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-br44r" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.694389 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.699700 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.710000 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.790840 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-utilities\") pod \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.790990 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-catalog-content\") pod \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.791089 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-utilities\") pod \"d12f03b6-5a9d-479e-9e73-2f2476161d97\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.791129 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7d8ps\" (UniqueName: \"kubernetes.io/projected/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-kube-api-access-7d8ps\") pod \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\" (UID: \"470d5b30-6a3d-4d02-9ef4-ce35ea66af80\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.791279 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ngxr\" (UniqueName: \"kubernetes.io/projected/7a467268-207b-41e8-927d-8bb4ce05c367-kube-api-access-5ngxr\") pod \"7a467268-207b-41e8-927d-8bb4ce05c367\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.791327 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-catalog-content\") pod \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.791369 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wv7q5\" (UniqueName: \"kubernetes.io/projected/d12f03b6-5a9d-479e-9e73-2f2476161d97-kube-api-access-wv7q5\") pod \"d12f03b6-5a9d-479e-9e73-2f2476161d97\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.791404 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-catalog-content\") pod \"d12f03b6-5a9d-479e-9e73-2f2476161d97\" (UID: \"d12f03b6-5a9d-479e-9e73-2f2476161d97\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.791438 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-trusted-ca\") pod \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.792035 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-utilities" (OuterVolumeSpecName: "utilities") pod "470d5b30-6a3d-4d02-9ef4-ce35ea66af80" (UID: "470d5b30-6a3d-4d02-9ef4-ce35ea66af80"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.792215 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "07b0580c-0d27-48ee-8f33-3c5d7638ac47" (UID: "07b0580c-0d27-48ee-8f33-3c5d7638ac47"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.793097 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-utilities" (OuterVolumeSpecName: "utilities") pod "d12f03b6-5a9d-479e-9e73-2f2476161d97" (UID: "d12f03b6-5a9d-479e-9e73-2f2476161d97"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.797620 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-kube-api-access-7d8ps" (OuterVolumeSpecName: "kube-api-access-7d8ps") pod "470d5b30-6a3d-4d02-9ef4-ce35ea66af80" (UID: "470d5b30-6a3d-4d02-9ef4-ce35ea66af80"). InnerVolumeSpecName "kube-api-access-7d8ps". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.797739 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d12f03b6-5a9d-479e-9e73-2f2476161d97-kube-api-access-wv7q5" (OuterVolumeSpecName: "kube-api-access-wv7q5") pod "d12f03b6-5a9d-479e-9e73-2f2476161d97" (UID: "d12f03b6-5a9d-479e-9e73-2f2476161d97"). InnerVolumeSpecName "kube-api-access-wv7q5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.800264 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a467268-207b-41e8-927d-8bb4ce05c367-kube-api-access-5ngxr" (OuterVolumeSpecName: "kube-api-access-5ngxr") pod "7a467268-207b-41e8-927d-8bb4ce05c367" (UID: "7a467268-207b-41e8-927d-8bb4ce05c367"). InnerVolumeSpecName "kube-api-access-5ngxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.847689 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d12f03b6-5a9d-479e-9e73-2f2476161d97" (UID: "d12f03b6-5a9d-479e-9e73-2f2476161d97"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.856913 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "470d5b30-6a3d-4d02-9ef4-ce35ea66af80" (UID: "470d5b30-6a3d-4d02-9ef4-ce35ea66af80"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.892426 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5kvh\" (UniqueName: \"kubernetes.io/projected/07b0580c-0d27-48ee-8f33-3c5d7638ac47-kube-api-access-r5kvh\") pod \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.892510 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tn8b5\" (UniqueName: \"kubernetes.io/projected/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-kube-api-access-tn8b5\") pod \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.892561 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-catalog-content\") pod \"7a467268-207b-41e8-927d-8bb4ce05c367\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.892609 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-utilities\") pod \"7a467268-207b-41e8-927d-8bb4ce05c367\" (UID: \"7a467268-207b-41e8-927d-8bb4ce05c367\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.892701 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-operator-metrics\") pod \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\" (UID: \"07b0580c-0d27-48ee-8f33-3c5d7638ac47\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.892732 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-utilities\") pod \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\" (UID: \"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd\") " Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.892990 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.893014 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.893033 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7d8ps\" (UniqueName: \"kubernetes.io/projected/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-kube-api-access-7d8ps\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.893052 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ngxr\" (UniqueName: \"kubernetes.io/projected/7a467268-207b-41e8-927d-8bb4ce05c367-kube-api-access-5ngxr\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.893069 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wv7q5\" (UniqueName: \"kubernetes.io/projected/d12f03b6-5a9d-479e-9e73-2f2476161d97-kube-api-access-wv7q5\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.893087 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d12f03b6-5a9d-479e-9e73-2f2476161d97-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.893103 4840 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.893122 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/470d5b30-6a3d-4d02-9ef4-ce35ea66af80-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.894403 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-utilities" (OuterVolumeSpecName: "utilities") pod "4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" (UID: "4e4ee4ac-929f-41ef-b1ed-ea6e070793bd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.911672 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-utilities" (OuterVolumeSpecName: "utilities") pod "7a467268-207b-41e8-927d-8bb4ce05c367" (UID: "7a467268-207b-41e8-927d-8bb4ce05c367"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.931843 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7a467268-207b-41e8-927d-8bb4ce05c367" (UID: "7a467268-207b-41e8-927d-8bb4ce05c367"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.932284 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" (UID: "4e4ee4ac-929f-41ef-b1ed-ea6e070793bd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.977220 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-kube-api-access-tn8b5" (OuterVolumeSpecName: "kube-api-access-tn8b5") pod "4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" (UID: "4e4ee4ac-929f-41ef-b1ed-ea6e070793bd"). InnerVolumeSpecName "kube-api-access-tn8b5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.978021 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07b0580c-0d27-48ee-8f33-3c5d7638ac47-kube-api-access-r5kvh" (OuterVolumeSpecName: "kube-api-access-r5kvh") pod "07b0580c-0d27-48ee-8f33-3c5d7638ac47" (UID: "07b0580c-0d27-48ee-8f33-3c5d7638ac47"). InnerVolumeSpecName "kube-api-access-r5kvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.978428 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "07b0580c-0d27-48ee-8f33-3c5d7638ac47" (UID: "07b0580c-0d27-48ee-8f33-3c5d7638ac47"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.994415 4840 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/07b0580c-0d27-48ee-8f33-3c5d7638ac47-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.994446 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.994456 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5kvh\" (UniqueName: \"kubernetes.io/projected/07b0580c-0d27-48ee-8f33-3c5d7638ac47-kube-api-access-r5kvh\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.994464 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tn8b5\" (UniqueName: \"kubernetes.io/projected/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-kube-api-access-tn8b5\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.994475 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.994483 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a467268-207b-41e8-927d-8bb4ce05c367-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:43 crc kubenswrapper[4840]: I1209 17:01:43.994507 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.212677 4840 generic.go:334] "Generic (PLEG): container finished" podID="d12f03b6-5a9d-479e-9e73-2f2476161d97" containerID="91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02" exitCode=0 Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.212744 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-br44r" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.212810 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-br44r" event={"ID":"d12f03b6-5a9d-479e-9e73-2f2476161d97","Type":"ContainerDied","Data":"91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02"} Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.212868 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-br44r" event={"ID":"d12f03b6-5a9d-479e-9e73-2f2476161d97","Type":"ContainerDied","Data":"5f62ad2d5bae7ccd19d244b186730c7a088fd62a99a9f37a8bb49629306fee3a"} Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.212897 4840 scope.go:117] "RemoveContainer" containerID="91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.217318 4840 generic.go:334] "Generic (PLEG): container finished" podID="7a467268-207b-41e8-927d-8bb4ce05c367" containerID="7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6" exitCode=0 Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.217414 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpnvf" event={"ID":"7a467268-207b-41e8-927d-8bb4ce05c367","Type":"ContainerDied","Data":"7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6"} Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.217457 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpnvf" event={"ID":"7a467268-207b-41e8-927d-8bb4ce05c367","Type":"ContainerDied","Data":"de2310ad95f7f0966e2f47d1b7508945502cf082c80befb40fee228444b66610"} Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.217549 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xpnvf" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.227400 4840 generic.go:334] "Generic (PLEG): container finished" podID="07b0580c-0d27-48ee-8f33-3c5d7638ac47" containerID="d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79" exitCode=0 Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.227490 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.227475 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" event={"ID":"07b0580c-0d27-48ee-8f33-3c5d7638ac47","Type":"ContainerDied","Data":"d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79"} Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.227575 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2ckb5" event={"ID":"07b0580c-0d27-48ee-8f33-3c5d7638ac47","Type":"ContainerDied","Data":"13a6db2af823af09680b6df8c42cb8cd4e3a4c657fc01d752827670e706fa9c3"} Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.239539 4840 scope.go:117] "RemoveContainer" containerID="768552dff795102188290b5b817241c75a2bcc9ef72fc055942a63bc0676157d" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.245948 4840 generic.go:334] "Generic (PLEG): container finished" podID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" containerID="c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab" exitCode=0 Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.246093 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2jxv" event={"ID":"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd","Type":"ContainerDied","Data":"c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab"} Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.246159 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2jxv" event={"ID":"4e4ee4ac-929f-41ef-b1ed-ea6e070793bd","Type":"ContainerDied","Data":"f566945c8b34e03ca8ba769da90b9a8fe1677257767542e6df585b8adb5b4610"} Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.246169 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w2jxv" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.258723 4840 generic.go:334] "Generic (PLEG): container finished" podID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" containerID="4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e" exitCode=0 Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.258846 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rgpwz" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.258945 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rgpwz" event={"ID":"470d5b30-6a3d-4d02-9ef4-ce35ea66af80","Type":"ContainerDied","Data":"4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e"} Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.259596 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rgpwz" event={"ID":"470d5b30-6a3d-4d02-9ef4-ce35ea66af80","Type":"ContainerDied","Data":"f497737cfe08b09b02eab01cc5a16700889e8af695db1542d37dfa1541d40cf9"} Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.261414 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-br44r"] Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.283414 4840 scope.go:117] "RemoveContainer" containerID="c3d77d1e2f636ea7b850e1fcb0291b0d27e3ead1dbebae2d74286eed4c725f63" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.286662 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-br44r"] Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.296985 4840 scope.go:117] "RemoveContainer" containerID="91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.297586 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xpnvf"] Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.297893 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02\": container with ID starting with 91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02 not found: ID does not exist" containerID="91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.297942 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02"} err="failed to get container status \"91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02\": rpc error: code = NotFound desc = could not find container \"91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02\": container with ID starting with 91a8529adc47279024553f2b21e0b0e361c8d233db155023115a5f88cb49ea02 not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.298011 4840 scope.go:117] "RemoveContainer" containerID="768552dff795102188290b5b817241c75a2bcc9ef72fc055942a63bc0676157d" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.298455 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"768552dff795102188290b5b817241c75a2bcc9ef72fc055942a63bc0676157d\": container with ID starting with 768552dff795102188290b5b817241c75a2bcc9ef72fc055942a63bc0676157d not found: ID does not exist" containerID="768552dff795102188290b5b817241c75a2bcc9ef72fc055942a63bc0676157d" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.298488 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"768552dff795102188290b5b817241c75a2bcc9ef72fc055942a63bc0676157d"} err="failed to get container status \"768552dff795102188290b5b817241c75a2bcc9ef72fc055942a63bc0676157d\": rpc error: code = NotFound desc = could not find container \"768552dff795102188290b5b817241c75a2bcc9ef72fc055942a63bc0676157d\": container with ID starting with 768552dff795102188290b5b817241c75a2bcc9ef72fc055942a63bc0676157d not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.298513 4840 scope.go:117] "RemoveContainer" containerID="c3d77d1e2f636ea7b850e1fcb0291b0d27e3ead1dbebae2d74286eed4c725f63" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.298772 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3d77d1e2f636ea7b850e1fcb0291b0d27e3ead1dbebae2d74286eed4c725f63\": container with ID starting with c3d77d1e2f636ea7b850e1fcb0291b0d27e3ead1dbebae2d74286eed4c725f63 not found: ID does not exist" containerID="c3d77d1e2f636ea7b850e1fcb0291b0d27e3ead1dbebae2d74286eed4c725f63" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.298792 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3d77d1e2f636ea7b850e1fcb0291b0d27e3ead1dbebae2d74286eed4c725f63"} err="failed to get container status \"c3d77d1e2f636ea7b850e1fcb0291b0d27e3ead1dbebae2d74286eed4c725f63\": rpc error: code = NotFound desc = could not find container \"c3d77d1e2f636ea7b850e1fcb0291b0d27e3ead1dbebae2d74286eed4c725f63\": container with ID starting with c3d77d1e2f636ea7b850e1fcb0291b0d27e3ead1dbebae2d74286eed4c725f63 not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.298810 4840 scope.go:117] "RemoveContainer" containerID="7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.303063 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xpnvf"] Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.306513 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2ckb5"] Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.314430 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2ckb5"] Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.315134 4840 scope.go:117] "RemoveContainer" containerID="60854aa5d3f3b55f98792a4c3cb47c8bc869b9cb86b3bd97509d8b992164723b" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.319233 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w2jxv"] Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.325044 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-w2jxv"] Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.328755 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rgpwz"] Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.331448 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rgpwz"] Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.336273 4840 scope.go:117] "RemoveContainer" containerID="f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.348016 4840 scope.go:117] "RemoveContainer" containerID="7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.348356 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6\": container with ID starting with 7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6 not found: ID does not exist" containerID="7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.348401 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6"} err="failed to get container status \"7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6\": rpc error: code = NotFound desc = could not find container \"7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6\": container with ID starting with 7c328fdb487ff8a5202d4463d04260a14c6a1e07ce5cbd8cd521e6d3331d51f6 not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.348430 4840 scope.go:117] "RemoveContainer" containerID="60854aa5d3f3b55f98792a4c3cb47c8bc869b9cb86b3bd97509d8b992164723b" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.348620 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60854aa5d3f3b55f98792a4c3cb47c8bc869b9cb86b3bd97509d8b992164723b\": container with ID starting with 60854aa5d3f3b55f98792a4c3cb47c8bc869b9cb86b3bd97509d8b992164723b not found: ID does not exist" containerID="60854aa5d3f3b55f98792a4c3cb47c8bc869b9cb86b3bd97509d8b992164723b" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.348649 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60854aa5d3f3b55f98792a4c3cb47c8bc869b9cb86b3bd97509d8b992164723b"} err="failed to get container status \"60854aa5d3f3b55f98792a4c3cb47c8bc869b9cb86b3bd97509d8b992164723b\": rpc error: code = NotFound desc = could not find container \"60854aa5d3f3b55f98792a4c3cb47c8bc869b9cb86b3bd97509d8b992164723b\": container with ID starting with 60854aa5d3f3b55f98792a4c3cb47c8bc869b9cb86b3bd97509d8b992164723b not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.348670 4840 scope.go:117] "RemoveContainer" containerID="f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.351171 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906\": container with ID starting with f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906 not found: ID does not exist" containerID="f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.351210 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906"} err="failed to get container status \"f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906\": rpc error: code = NotFound desc = could not find container \"f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906\": container with ID starting with f1f0f1e38bbc2d58e0f23e1dc2c5abb66caf5705fd4e89e39924d29fffcef906 not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.351236 4840 scope.go:117] "RemoveContainer" containerID="d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.393924 4840 scope.go:117] "RemoveContainer" containerID="d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.395534 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79\": container with ID starting with d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79 not found: ID does not exist" containerID="d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.395568 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79"} err="failed to get container status \"d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79\": rpc error: code = NotFound desc = could not find container \"d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79\": container with ID starting with d7c69039ff26429f878ee0cb3d06f7fa6a67d23600bb7d6789de84b237c17a79 not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.395592 4840 scope.go:117] "RemoveContainer" containerID="c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.408112 4840 scope.go:117] "RemoveContainer" containerID="5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.425516 4840 scope.go:117] "RemoveContainer" containerID="faeeee77c57a2550f5615bbf60dc71dbc12acb2879091571e0f3e82cacb8062d" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.446588 4840 scope.go:117] "RemoveContainer" containerID="c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.447195 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab\": container with ID starting with c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab not found: ID does not exist" containerID="c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.447274 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab"} err="failed to get container status \"c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab\": rpc error: code = NotFound desc = could not find container \"c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab\": container with ID starting with c3a6c823a5aa09c2c4f78d1d2432cb6cde0b167a4dce62947cc6aac6fe977aab not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.447315 4840 scope.go:117] "RemoveContainer" containerID="5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.447787 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1\": container with ID starting with 5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1 not found: ID does not exist" containerID="5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.447845 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1"} err="failed to get container status \"5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1\": rpc error: code = NotFound desc = could not find container \"5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1\": container with ID starting with 5285bb2faee635af2b2262e1f9ea1e65372c1524d3b8937d3085810c074685f1 not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.447886 4840 scope.go:117] "RemoveContainer" containerID="faeeee77c57a2550f5615bbf60dc71dbc12acb2879091571e0f3e82cacb8062d" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.448329 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"faeeee77c57a2550f5615bbf60dc71dbc12acb2879091571e0f3e82cacb8062d\": container with ID starting with faeeee77c57a2550f5615bbf60dc71dbc12acb2879091571e0f3e82cacb8062d not found: ID does not exist" containerID="faeeee77c57a2550f5615bbf60dc71dbc12acb2879091571e0f3e82cacb8062d" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.448395 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"faeeee77c57a2550f5615bbf60dc71dbc12acb2879091571e0f3e82cacb8062d"} err="failed to get container status \"faeeee77c57a2550f5615bbf60dc71dbc12acb2879091571e0f3e82cacb8062d\": rpc error: code = NotFound desc = could not find container \"faeeee77c57a2550f5615bbf60dc71dbc12acb2879091571e0f3e82cacb8062d\": container with ID starting with faeeee77c57a2550f5615bbf60dc71dbc12acb2879091571e0f3e82cacb8062d not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.448428 4840 scope.go:117] "RemoveContainer" containerID="4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.462739 4840 scope.go:117] "RemoveContainer" containerID="3c328deaca6374991b8763844f5640d12b1b7ad0f949d65fb2f334f8e999d33c" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.476733 4840 scope.go:117] "RemoveContainer" containerID="b0be5c249b1d39ca493dd022e75fea525c8dd388fd46e101699352f5d9603753" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.492956 4840 scope.go:117] "RemoveContainer" containerID="4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.493393 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e\": container with ID starting with 4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e not found: ID does not exist" containerID="4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.493434 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e"} err="failed to get container status \"4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e\": rpc error: code = NotFound desc = could not find container \"4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e\": container with ID starting with 4b921d0eb023c3898c8069240226d668ef9cf7d2324fbdf5f790291ed4bf774e not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.493463 4840 scope.go:117] "RemoveContainer" containerID="3c328deaca6374991b8763844f5640d12b1b7ad0f949d65fb2f334f8e999d33c" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.493790 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c328deaca6374991b8763844f5640d12b1b7ad0f949d65fb2f334f8e999d33c\": container with ID starting with 3c328deaca6374991b8763844f5640d12b1b7ad0f949d65fb2f334f8e999d33c not found: ID does not exist" containerID="3c328deaca6374991b8763844f5640d12b1b7ad0f949d65fb2f334f8e999d33c" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.493824 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c328deaca6374991b8763844f5640d12b1b7ad0f949d65fb2f334f8e999d33c"} err="failed to get container status \"3c328deaca6374991b8763844f5640d12b1b7ad0f949d65fb2f334f8e999d33c\": rpc error: code = NotFound desc = could not find container \"3c328deaca6374991b8763844f5640d12b1b7ad0f949d65fb2f334f8e999d33c\": container with ID starting with 3c328deaca6374991b8763844f5640d12b1b7ad0f949d65fb2f334f8e999d33c not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.493848 4840 scope.go:117] "RemoveContainer" containerID="b0be5c249b1d39ca493dd022e75fea525c8dd388fd46e101699352f5d9603753" Dec 09 17:01:44 crc kubenswrapper[4840]: E1209 17:01:44.494214 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0be5c249b1d39ca493dd022e75fea525c8dd388fd46e101699352f5d9603753\": container with ID starting with b0be5c249b1d39ca493dd022e75fea525c8dd388fd46e101699352f5d9603753 not found: ID does not exist" containerID="b0be5c249b1d39ca493dd022e75fea525c8dd388fd46e101699352f5d9603753" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.494240 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0be5c249b1d39ca493dd022e75fea525c8dd388fd46e101699352f5d9603753"} err="failed to get container status \"b0be5c249b1d39ca493dd022e75fea525c8dd388fd46e101699352f5d9603753\": rpc error: code = NotFound desc = could not find container \"b0be5c249b1d39ca493dd022e75fea525c8dd388fd46e101699352f5d9603753\": container with ID starting with b0be5c249b1d39ca493dd022e75fea525c8dd388fd46e101699352f5d9603753 not found: ID does not exist" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.620407 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07b0580c-0d27-48ee-8f33-3c5d7638ac47" path="/var/lib/kubelet/pods/07b0580c-0d27-48ee-8f33-3c5d7638ac47/volumes" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.622043 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" path="/var/lib/kubelet/pods/470d5b30-6a3d-4d02-9ef4-ce35ea66af80/volumes" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.623547 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" path="/var/lib/kubelet/pods/4e4ee4ac-929f-41ef-b1ed-ea6e070793bd/volumes" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.626032 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a467268-207b-41e8-927d-8bb4ce05c367" path="/var/lib/kubelet/pods/7a467268-207b-41e8-927d-8bb4ce05c367/volumes" Dec 09 17:01:44 crc kubenswrapper[4840]: I1209 17:01:44.627744 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d12f03b6-5a9d-479e-9e73-2f2476161d97" path="/var/lib/kubelet/pods/d12f03b6-5a9d-479e-9e73-2f2476161d97/volumes" Dec 09 17:01:54 crc kubenswrapper[4840]: I1209 17:01:54.429875 4840 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Dec 09 17:02:02 crc kubenswrapper[4840]: I1209 17:02:02.376200 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Dec 09 17:02:02 crc kubenswrapper[4840]: I1209 17:02:02.379081 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 09 17:02:02 crc kubenswrapper[4840]: I1209 17:02:02.379134 4840 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="c7e82e964f5c17dca11555d96cd263e34c6c558e10bdc80468175b51c63b75f7" exitCode=137 Dec 09 17:02:02 crc kubenswrapper[4840]: I1209 17:02:02.379185 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"c7e82e964f5c17dca11555d96cd263e34c6c558e10bdc80468175b51c63b75f7"} Dec 09 17:02:02 crc kubenswrapper[4840]: I1209 17:02:02.379262 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c138ede05e8519d97e7650a84ca0acfd97aca04b5e1cecfd1018c75403372508"} Dec 09 17:02:02 crc kubenswrapper[4840]: I1209 17:02:02.379301 4840 scope.go:117] "RemoveContainer" containerID="e4fc7e963839ddaf4299d58eb51f1836f38757b0a086b22bbcb7c7fee2597012" Dec 09 17:02:03 crc kubenswrapper[4840]: I1209 17:02:03.386307 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Dec 09 17:02:08 crc kubenswrapper[4840]: I1209 17:02:08.755763 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 17:02:11 crc kubenswrapper[4840]: I1209 17:02:11.480923 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 17:02:11 crc kubenswrapper[4840]: I1209 17:02:11.485145 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 17:02:12 crc kubenswrapper[4840]: I1209 17:02:12.452789 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.058999 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p6wc"] Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.061910 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" podUID="40f11448-6267-4747-9954-da5b290bcef6" containerName="controller-manager" containerID="cri-o://78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f" gracePeriod=30 Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.068534 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq"] Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.068758 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" podUID="185aeb05-e73d-4ece-a947-8163702dd545" containerName="route-controller-manager" containerID="cri-o://4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86" gracePeriod=30 Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.127907 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jwf2s"] Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128406 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a467268-207b-41e8-927d-8bb4ce05c367" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128423 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a467268-207b-41e8-927d-8bb4ce05c367" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128433 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128441 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128454 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128463 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128475 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" containerName="extract-utilities" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128483 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" containerName="extract-utilities" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128492 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d12f03b6-5a9d-479e-9e73-2f2476161d97" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128500 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d12f03b6-5a9d-479e-9e73-2f2476161d97" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128512 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" containerName="installer" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128519 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" containerName="installer" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128529 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a467268-207b-41e8-927d-8bb4ce05c367" containerName="extract-utilities" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128537 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a467268-207b-41e8-927d-8bb4ce05c367" containerName="extract-utilities" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128545 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a467268-207b-41e8-927d-8bb4ce05c367" containerName="extract-content" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128553 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a467268-207b-41e8-927d-8bb4ce05c367" containerName="extract-content" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128564 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" containerName="extract-content" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128571 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" containerName="extract-content" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128580 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128588 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128599 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d12f03b6-5a9d-479e-9e73-2f2476161d97" containerName="extract-utilities" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128607 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d12f03b6-5a9d-479e-9e73-2f2476161d97" containerName="extract-utilities" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128620 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d12f03b6-5a9d-479e-9e73-2f2476161d97" containerName="extract-content" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128628 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d12f03b6-5a9d-479e-9e73-2f2476161d97" containerName="extract-content" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128637 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07b0580c-0d27-48ee-8f33-3c5d7638ac47" containerName="marketplace-operator" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128645 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="07b0580c-0d27-48ee-8f33-3c5d7638ac47" containerName="marketplace-operator" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128653 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" containerName="extract-content" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128660 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" containerName="extract-content" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.128670 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" containerName="extract-utilities" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128678 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" containerName="extract-utilities" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128802 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a467268-207b-41e8-927d-8bb4ce05c367" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128818 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="470d5b30-6a3d-4d02-9ef4-ce35ea66af80" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128830 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128839 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="07b0580c-0d27-48ee-8f33-3c5d7638ac47" containerName="marketplace-operator" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128853 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e4ee4ac-929f-41ef-b1ed-ea6e070793bd" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128864 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="9096f725-037d-47fd-a7f8-61a896113bc5" containerName="installer" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.128875 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="d12f03b6-5a9d-479e-9e73-2f2476161d97" containerName="registry-server" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.129382 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.131338 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.131664 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.131672 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.131679 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.142682 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jwf2s"] Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.143535 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0448e5c8-5dda-4bb9-a501-b76890d0bf29-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-jwf2s\" (UID: \"0448e5c8-5dda-4bb9-a501-b76890d0bf29\") " pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.143610 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0448e5c8-5dda-4bb9-a501-b76890d0bf29-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-jwf2s\" (UID: \"0448e5c8-5dda-4bb9-a501-b76890d0bf29\") " pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.143653 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdfk2\" (UniqueName: \"kubernetes.io/projected/0448e5c8-5dda-4bb9-a501-b76890d0bf29-kube-api-access-hdfk2\") pod \"marketplace-operator-79b997595-jwf2s\" (UID: \"0448e5c8-5dda-4bb9-a501-b76890d0bf29\") " pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.144046 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.244474 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0448e5c8-5dda-4bb9-a501-b76890d0bf29-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-jwf2s\" (UID: \"0448e5c8-5dda-4bb9-a501-b76890d0bf29\") " pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.244664 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdfk2\" (UniqueName: \"kubernetes.io/projected/0448e5c8-5dda-4bb9-a501-b76890d0bf29-kube-api-access-hdfk2\") pod \"marketplace-operator-79b997595-jwf2s\" (UID: \"0448e5c8-5dda-4bb9-a501-b76890d0bf29\") " pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.244730 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0448e5c8-5dda-4bb9-a501-b76890d0bf29-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-jwf2s\" (UID: \"0448e5c8-5dda-4bb9-a501-b76890d0bf29\") " pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.246191 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0448e5c8-5dda-4bb9-a501-b76890d0bf29-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-jwf2s\" (UID: \"0448e5c8-5dda-4bb9-a501-b76890d0bf29\") " pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.251590 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0448e5c8-5dda-4bb9-a501-b76890d0bf29-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-jwf2s\" (UID: \"0448e5c8-5dda-4bb9-a501-b76890d0bf29\") " pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.264275 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdfk2\" (UniqueName: \"kubernetes.io/projected/0448e5c8-5dda-4bb9-a501-b76890d0bf29-kube-api-access-hdfk2\") pod \"marketplace-operator-79b997595-jwf2s\" (UID: \"0448e5c8-5dda-4bb9-a501-b76890d0bf29\") " pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.438944 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.459370 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.527409 4840 generic.go:334] "Generic (PLEG): container finished" podID="185aeb05-e73d-4ece-a947-8163702dd545" containerID="4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86" exitCode=0 Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.527459 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.527491 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" event={"ID":"185aeb05-e73d-4ece-a947-8163702dd545","Type":"ContainerDied","Data":"4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86"} Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.527531 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq" event={"ID":"185aeb05-e73d-4ece-a947-8163702dd545","Type":"ContainerDied","Data":"3352fceb67167fc7f2e2b1868f024f5aac35505235456264ad53150dbd01aa03"} Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.527548 4840 scope.go:117] "RemoveContainer" containerID="4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.528719 4840 generic.go:334] "Generic (PLEG): container finished" podID="40f11448-6267-4747-9954-da5b290bcef6" containerID="78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f" exitCode=0 Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.528776 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" event={"ID":"40f11448-6267-4747-9954-da5b290bcef6","Type":"ContainerDied","Data":"78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f"} Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.528793 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" event={"ID":"40f11448-6267-4747-9954-da5b290bcef6","Type":"ContainerDied","Data":"e8b38d18bb2de70b0853a8e341e3c0ddc0877e27a2f37c7acd51d61deb0d4846"} Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.528799 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7p6wc" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.532345 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.547623 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/185aeb05-e73d-4ece-a947-8163702dd545-serving-cert\") pod \"185aeb05-e73d-4ece-a947-8163702dd545\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.547737 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6wzk\" (UniqueName: \"kubernetes.io/projected/185aeb05-e73d-4ece-a947-8163702dd545-kube-api-access-v6wzk\") pod \"185aeb05-e73d-4ece-a947-8163702dd545\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.547784 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-config\") pod \"185aeb05-e73d-4ece-a947-8163702dd545\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.547846 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-client-ca\") pod \"185aeb05-e73d-4ece-a947-8163702dd545\" (UID: \"185aeb05-e73d-4ece-a947-8163702dd545\") " Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.548891 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-client-ca" (OuterVolumeSpecName: "client-ca") pod "185aeb05-e73d-4ece-a947-8163702dd545" (UID: "185aeb05-e73d-4ece-a947-8163702dd545"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.548956 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-config" (OuterVolumeSpecName: "config") pod "185aeb05-e73d-4ece-a947-8163702dd545" (UID: "185aeb05-e73d-4ece-a947-8163702dd545"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.551517 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/185aeb05-e73d-4ece-a947-8163702dd545-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "185aeb05-e73d-4ece-a947-8163702dd545" (UID: "185aeb05-e73d-4ece-a947-8163702dd545"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.551716 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/185aeb05-e73d-4ece-a947-8163702dd545-kube-api-access-v6wzk" (OuterVolumeSpecName: "kube-api-access-v6wzk") pod "185aeb05-e73d-4ece-a947-8163702dd545" (UID: "185aeb05-e73d-4ece-a947-8163702dd545"). InnerVolumeSpecName "kube-api-access-v6wzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.557943 4840 scope.go:117] "RemoveContainer" containerID="4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.558413 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86\": container with ID starting with 4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86 not found: ID does not exist" containerID="4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.558449 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86"} err="failed to get container status \"4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86\": rpc error: code = NotFound desc = could not find container \"4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86\": container with ID starting with 4b8d4ed30068c940727395c71cd37eb4e787ea451603b9ab2136a60a2f0f5c86 not found: ID does not exist" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.558474 4840 scope.go:117] "RemoveContainer" containerID="78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.574334 4840 scope.go:117] "RemoveContainer" containerID="78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f" Dec 09 17:02:26 crc kubenswrapper[4840]: E1209 17:02:26.574844 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f\": container with ID starting with 78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f not found: ID does not exist" containerID="78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.575097 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f"} err="failed to get container status \"78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f\": rpc error: code = NotFound desc = could not find container \"78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f\": container with ID starting with 78ccba6c3792de92a9f7c0d1bf045a4bb60a5f1df6456844c8f4fb0abb23b36f not found: ID does not exist" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.649240 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5k8p\" (UniqueName: \"kubernetes.io/projected/40f11448-6267-4747-9954-da5b290bcef6-kube-api-access-p5k8p\") pod \"40f11448-6267-4747-9954-da5b290bcef6\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.649592 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40f11448-6267-4747-9954-da5b290bcef6-serving-cert\") pod \"40f11448-6267-4747-9954-da5b290bcef6\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.649649 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-client-ca\") pod \"40f11448-6267-4747-9954-da5b290bcef6\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.649673 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-config\") pod \"40f11448-6267-4747-9954-da5b290bcef6\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.650122 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-proxy-ca-bundles\") pod \"40f11448-6267-4747-9954-da5b290bcef6\" (UID: \"40f11448-6267-4747-9954-da5b290bcef6\") " Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.650384 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6wzk\" (UniqueName: \"kubernetes.io/projected/185aeb05-e73d-4ece-a947-8163702dd545-kube-api-access-v6wzk\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.650403 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.650416 4840 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/185aeb05-e73d-4ece-a947-8163702dd545-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.650428 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/185aeb05-e73d-4ece-a947-8163702dd545-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.651251 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "40f11448-6267-4747-9954-da5b290bcef6" (UID: "40f11448-6267-4747-9954-da5b290bcef6"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.651395 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-config" (OuterVolumeSpecName: "config") pod "40f11448-6267-4747-9954-da5b290bcef6" (UID: "40f11448-6267-4747-9954-da5b290bcef6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.651478 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-client-ca" (OuterVolumeSpecName: "client-ca") pod "40f11448-6267-4747-9954-da5b290bcef6" (UID: "40f11448-6267-4747-9954-da5b290bcef6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.655042 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40f11448-6267-4747-9954-da5b290bcef6-kube-api-access-p5k8p" (OuterVolumeSpecName: "kube-api-access-p5k8p") pod "40f11448-6267-4747-9954-da5b290bcef6" (UID: "40f11448-6267-4747-9954-da5b290bcef6"). InnerVolumeSpecName "kube-api-access-p5k8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.657280 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40f11448-6267-4747-9954-da5b290bcef6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "40f11448-6267-4747-9954-da5b290bcef6" (UID: "40f11448-6267-4747-9954-da5b290bcef6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.741370 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jwf2s"] Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.751089 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40f11448-6267-4747-9954-da5b290bcef6-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.751123 4840 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.751132 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.751141 4840 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/40f11448-6267-4747-9954-da5b290bcef6-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.751152 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5k8p\" (UniqueName: \"kubernetes.io/projected/40f11448-6267-4747-9954-da5b290bcef6-kube-api-access-p5k8p\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.842635 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq"] Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.845270 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-66mgq"] Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.862089 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p6wc"] Dec 09 17:02:26 crc kubenswrapper[4840]: I1209 17:02:26.868254 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p6wc"] Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.317273 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-74577df4c5-tngqw"] Dec 09 17:02:27 crc kubenswrapper[4840]: E1209 17:02:27.317757 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40f11448-6267-4747-9954-da5b290bcef6" containerName="controller-manager" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.317774 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="40f11448-6267-4747-9954-da5b290bcef6" containerName="controller-manager" Dec 09 17:02:27 crc kubenswrapper[4840]: E1209 17:02:27.317784 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="185aeb05-e73d-4ece-a947-8163702dd545" containerName="route-controller-manager" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.317791 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="185aeb05-e73d-4ece-a947-8163702dd545" containerName="route-controller-manager" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.317881 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="40f11448-6267-4747-9954-da5b290bcef6" containerName="controller-manager" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.317896 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="185aeb05-e73d-4ece-a947-8163702dd545" containerName="route-controller-manager" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.318297 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.320909 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5978455979-hft5q"] Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.321189 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.321373 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.321517 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.321656 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.322146 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.323765 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.324322 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.324470 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.325977 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.328249 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.328499 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.328937 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.329139 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5978455979-hft5q"] Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.329545 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.329701 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.332424 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-74577df4c5-tngqw"] Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.460061 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/257c5229-a546-421d-8cfd-fbb2a96c0cde-config\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.460116 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/257c5229-a546-421d-8cfd-fbb2a96c0cde-serving-cert\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.460190 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-config\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.460225 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdrh7\" (UniqueName: \"kubernetes.io/projected/257c5229-a546-421d-8cfd-fbb2a96c0cde-kube-api-access-xdrh7\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.460259 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/257c5229-a546-421d-8cfd-fbb2a96c0cde-client-ca\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.460312 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/353ca902-a15f-4bae-b353-48ae927810e3-serving-cert\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.460341 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-client-ca\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.460378 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cftn9\" (UniqueName: \"kubernetes.io/projected/353ca902-a15f-4bae-b353-48ae927810e3-kube-api-access-cftn9\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.460397 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-proxy-ca-bundles\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.533828 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" event={"ID":"0448e5c8-5dda-4bb9-a501-b76890d0bf29","Type":"ContainerStarted","Data":"f40e2799a2476a00a73acd9c2818aacafddef272fd1daf50c18083e6fc4a1e2c"} Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.533877 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" event={"ID":"0448e5c8-5dda-4bb9-a501-b76890d0bf29","Type":"ContainerStarted","Data":"823e0275b8effe7e79fed1013f0e0bf78f4eaa51d5bf812cfc7abba418d9cb71"} Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.534850 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.536922 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.551065 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-jwf2s" podStartSLOduration=1.5510449450000001 podStartE2EDuration="1.551044945s" podCreationTimestamp="2025-12-09 17:02:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:02:27.548104839 +0000 UTC m=+333.539215472" watchObservedRunningTime="2025-12-09 17:02:27.551044945 +0000 UTC m=+333.542155578" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.561161 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/257c5229-a546-421d-8cfd-fbb2a96c0cde-config\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.561205 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/257c5229-a546-421d-8cfd-fbb2a96c0cde-serving-cert\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.561223 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-config\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.561241 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdrh7\" (UniqueName: \"kubernetes.io/projected/257c5229-a546-421d-8cfd-fbb2a96c0cde-kube-api-access-xdrh7\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.561280 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/257c5229-a546-421d-8cfd-fbb2a96c0cde-client-ca\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.561304 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/353ca902-a15f-4bae-b353-48ae927810e3-serving-cert\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.561326 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-client-ca\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.561380 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cftn9\" (UniqueName: \"kubernetes.io/projected/353ca902-a15f-4bae-b353-48ae927810e3-kube-api-access-cftn9\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.561402 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-proxy-ca-bundles\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.562848 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/257c5229-a546-421d-8cfd-fbb2a96c0cde-config\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.563106 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/257c5229-a546-421d-8cfd-fbb2a96c0cde-client-ca\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.563303 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-client-ca\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.563611 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-proxy-ca-bundles\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.563914 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-config\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.567118 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/257c5229-a546-421d-8cfd-fbb2a96c0cde-serving-cert\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.568406 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/353ca902-a15f-4bae-b353-48ae927810e3-serving-cert\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.581884 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdrh7\" (UniqueName: \"kubernetes.io/projected/257c5229-a546-421d-8cfd-fbb2a96c0cde-kube-api-access-xdrh7\") pod \"route-controller-manager-5978455979-hft5q\" (UID: \"257c5229-a546-421d-8cfd-fbb2a96c0cde\") " pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.585261 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cftn9\" (UniqueName: \"kubernetes.io/projected/353ca902-a15f-4bae-b353-48ae927810e3-kube-api-access-cftn9\") pod \"controller-manager-74577df4c5-tngqw\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.703654 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.714382 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.983542 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-74577df4c5-tngqw"] Dec 09 17:02:27 crc kubenswrapper[4840]: W1209 17:02:27.988734 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod353ca902_a15f_4bae_b353_48ae927810e3.slice/crio-7f6a2823e1bf9eb282dccc58c1648c50a8cd8c6a9acbf00a1b47f20fc5a0daf6 WatchSource:0}: Error finding container 7f6a2823e1bf9eb282dccc58c1648c50a8cd8c6a9acbf00a1b47f20fc5a0daf6: Status 404 returned error can't find the container with id 7f6a2823e1bf9eb282dccc58c1648c50a8cd8c6a9acbf00a1b47f20fc5a0daf6 Dec 09 17:02:27 crc kubenswrapper[4840]: I1209 17:02:27.993188 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5978455979-hft5q"] Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.543285 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" event={"ID":"353ca902-a15f-4bae-b353-48ae927810e3","Type":"ContainerStarted","Data":"b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016"} Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.543660 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.543675 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" event={"ID":"353ca902-a15f-4bae-b353-48ae927810e3","Type":"ContainerStarted","Data":"7f6a2823e1bf9eb282dccc58c1648c50a8cd8c6a9acbf00a1b47f20fc5a0daf6"} Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.545910 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" event={"ID":"257c5229-a546-421d-8cfd-fbb2a96c0cde","Type":"ContainerStarted","Data":"bebb2deebc6d56bda1f21274db3d11811392da00189275f108f90d9576c8a550"} Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.546071 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" event={"ID":"257c5229-a546-421d-8cfd-fbb2a96c0cde","Type":"ContainerStarted","Data":"407785b68200dc88ddce0a08399012d088f053f812cb7cc0c194a91e4d6f3b8a"} Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.546183 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.550484 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.551057 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.569181 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" podStartSLOduration=2.569157958 podStartE2EDuration="2.569157958s" podCreationTimestamp="2025-12-09 17:02:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:02:28.565001716 +0000 UTC m=+334.556112359" watchObservedRunningTime="2025-12-09 17:02:28.569157958 +0000 UTC m=+334.560268611" Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.603284 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5978455979-hft5q" podStartSLOduration=2.603264944 podStartE2EDuration="2.603264944s" podCreationTimestamp="2025-12-09 17:02:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:02:28.602214173 +0000 UTC m=+334.593324826" watchObservedRunningTime="2025-12-09 17:02:28.603264944 +0000 UTC m=+334.594375577" Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.615519 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="185aeb05-e73d-4ece-a947-8163702dd545" path="/var/lib/kubelet/pods/185aeb05-e73d-4ece-a947-8163702dd545/volumes" Dec 09 17:02:28 crc kubenswrapper[4840]: I1209 17:02:28.616382 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40f11448-6267-4747-9954-da5b290bcef6" path="/var/lib/kubelet/pods/40f11448-6267-4747-9954-da5b290bcef6/volumes" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.035947 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.036292 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.038714 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-74577df4c5-tngqw"] Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.039086 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" podUID="353ca902-a15f-4bae-b353-48ae927810e3" containerName="controller-manager" containerID="cri-o://b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016" gracePeriod=30 Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.487799 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.579402 4840 generic.go:334] "Generic (PLEG): container finished" podID="353ca902-a15f-4bae-b353-48ae927810e3" containerID="b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016" exitCode=0 Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.579445 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" event={"ID":"353ca902-a15f-4bae-b353-48ae927810e3","Type":"ContainerDied","Data":"b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016"} Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.579470 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" event={"ID":"353ca902-a15f-4bae-b353-48ae927810e3","Type":"ContainerDied","Data":"7f6a2823e1bf9eb282dccc58c1648c50a8cd8c6a9acbf00a1b47f20fc5a0daf6"} Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.579488 4840 scope.go:117] "RemoveContainer" containerID="b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.579596 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74577df4c5-tngqw" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.593276 4840 scope.go:117] "RemoveContainer" containerID="b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016" Dec 09 17:02:34 crc kubenswrapper[4840]: E1209 17:02:34.593681 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016\": container with ID starting with b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016 not found: ID does not exist" containerID="b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.593735 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016"} err="failed to get container status \"b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016\": rpc error: code = NotFound desc = could not find container \"b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016\": container with ID starting with b3c60311763437a8de04594d558ba98cfbeb46bff9a9b2246725a73caca21016 not found: ID does not exist" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.657097 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/353ca902-a15f-4bae-b353-48ae927810e3-serving-cert\") pod \"353ca902-a15f-4bae-b353-48ae927810e3\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.657409 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-config\") pod \"353ca902-a15f-4bae-b353-48ae927810e3\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.657488 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-proxy-ca-bundles\") pod \"353ca902-a15f-4bae-b353-48ae927810e3\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.657571 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-client-ca\") pod \"353ca902-a15f-4bae-b353-48ae927810e3\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.657606 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cftn9\" (UniqueName: \"kubernetes.io/projected/353ca902-a15f-4bae-b353-48ae927810e3-kube-api-access-cftn9\") pod \"353ca902-a15f-4bae-b353-48ae927810e3\" (UID: \"353ca902-a15f-4bae-b353-48ae927810e3\") " Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.658336 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "353ca902-a15f-4bae-b353-48ae927810e3" (UID: "353ca902-a15f-4bae-b353-48ae927810e3"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.658328 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-client-ca" (OuterVolumeSpecName: "client-ca") pod "353ca902-a15f-4bae-b353-48ae927810e3" (UID: "353ca902-a15f-4bae-b353-48ae927810e3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.658449 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-config" (OuterVolumeSpecName: "config") pod "353ca902-a15f-4bae-b353-48ae927810e3" (UID: "353ca902-a15f-4bae-b353-48ae927810e3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.662672 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/353ca902-a15f-4bae-b353-48ae927810e3-kube-api-access-cftn9" (OuterVolumeSpecName: "kube-api-access-cftn9") pod "353ca902-a15f-4bae-b353-48ae927810e3" (UID: "353ca902-a15f-4bae-b353-48ae927810e3"). InnerVolumeSpecName "kube-api-access-cftn9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.663078 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/353ca902-a15f-4bae-b353-48ae927810e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "353ca902-a15f-4bae-b353-48ae927810e3" (UID: "353ca902-a15f-4bae-b353-48ae927810e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.759271 4840 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.759536 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cftn9\" (UniqueName: \"kubernetes.io/projected/353ca902-a15f-4bae-b353-48ae927810e3-kube-api-access-cftn9\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.759605 4840 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/353ca902-a15f-4bae-b353-48ae927810e3-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.759670 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.759730 4840 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/353ca902-a15f-4bae-b353-48ae927810e3-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.909136 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-74577df4c5-tngqw"] Dec 09 17:02:34 crc kubenswrapper[4840]: I1209 17:02:34.914719 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-74577df4c5-tngqw"] Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.326903 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp"] Dec 09 17:02:35 crc kubenswrapper[4840]: E1209 17:02:35.327241 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="353ca902-a15f-4bae-b353-48ae927810e3" containerName="controller-manager" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.327265 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="353ca902-a15f-4bae-b353-48ae927810e3" containerName="controller-manager" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.327426 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="353ca902-a15f-4bae-b353-48ae927810e3" containerName="controller-manager" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.328015 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.330654 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.330760 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.331469 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.334407 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.334635 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.335137 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.358278 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.365414 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp"] Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.468322 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c5f73f38-3e14-41f1-95c2-a4851cc0f621-proxy-ca-bundles\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.468454 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5f73f38-3e14-41f1-95c2-a4851cc0f621-config\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.468541 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c5f73f38-3e14-41f1-95c2-a4851cc0f621-client-ca\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.468591 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c5f73f38-3e14-41f1-95c2-a4851cc0f621-serving-cert\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.468676 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hws4l\" (UniqueName: \"kubernetes.io/projected/c5f73f38-3e14-41f1-95c2-a4851cc0f621-kube-api-access-hws4l\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.570331 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c5f73f38-3e14-41f1-95c2-a4851cc0f621-serving-cert\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.570428 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c5f73f38-3e14-41f1-95c2-a4851cc0f621-client-ca\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.570504 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hws4l\" (UniqueName: \"kubernetes.io/projected/c5f73f38-3e14-41f1-95c2-a4851cc0f621-kube-api-access-hws4l\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.570600 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c5f73f38-3e14-41f1-95c2-a4851cc0f621-proxy-ca-bundles\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.570684 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5f73f38-3e14-41f1-95c2-a4851cc0f621-config\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.571736 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c5f73f38-3e14-41f1-95c2-a4851cc0f621-client-ca\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.573303 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5f73f38-3e14-41f1-95c2-a4851cc0f621-config\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.573873 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c5f73f38-3e14-41f1-95c2-a4851cc0f621-proxy-ca-bundles\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.576346 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c5f73f38-3e14-41f1-95c2-a4851cc0f621-serving-cert\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.594247 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hws4l\" (UniqueName: \"kubernetes.io/projected/c5f73f38-3e14-41f1-95c2-a4851cc0f621-kube-api-access-hws4l\") pod \"controller-manager-5f49b8c6f4-pslmp\" (UID: \"c5f73f38-3e14-41f1-95c2-a4851cc0f621\") " pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.664878 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:35 crc kubenswrapper[4840]: I1209 17:02:35.882619 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp"] Dec 09 17:02:36 crc kubenswrapper[4840]: I1209 17:02:36.591398 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" event={"ID":"c5f73f38-3e14-41f1-95c2-a4851cc0f621","Type":"ContainerStarted","Data":"8a65f810f58e7b71222fd8323097617fa70fa5b9a21743b3f13b9d76985c59e7"} Dec 09 17:02:36 crc kubenswrapper[4840]: I1209 17:02:36.592776 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" event={"ID":"c5f73f38-3e14-41f1-95c2-a4851cc0f621","Type":"ContainerStarted","Data":"996f003ff8125d2f0c1e7df2c6cf1e15d4b903169e5bed99f3379d433ddb078d"} Dec 09 17:02:36 crc kubenswrapper[4840]: I1209 17:02:36.592871 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:36 crc kubenswrapper[4840]: I1209 17:02:36.595763 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" Dec 09 17:02:36 crc kubenswrapper[4840]: I1209 17:02:36.615631 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="353ca902-a15f-4bae-b353-48ae927810e3" path="/var/lib/kubelet/pods/353ca902-a15f-4bae-b353-48ae927810e3/volumes" Dec 09 17:02:36 crc kubenswrapper[4840]: I1209 17:02:36.627640 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5f49b8c6f4-pslmp" podStartSLOduration=2.627624472 podStartE2EDuration="2.627624472s" podCreationTimestamp="2025-12-09 17:02:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:02:36.606165069 +0000 UTC m=+342.597275702" watchObservedRunningTime="2025-12-09 17:02:36.627624472 +0000 UTC m=+342.618735105" Dec 09 17:02:51 crc kubenswrapper[4840]: I1209 17:02:51.989220 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dghwt"] Dec 09 17:02:51 crc kubenswrapper[4840]: I1209 17:02:51.990371 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.004197 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dghwt"] Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.077711 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/15f2ed86-a75e-4355-b815-be259faf9c8b-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.077876 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/15f2ed86-a75e-4355-b815-be259faf9c8b-registry-certificates\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.077921 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/15f2ed86-a75e-4355-b815-be259faf9c8b-trusted-ca\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.077957 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k8wg\" (UniqueName: \"kubernetes.io/projected/15f2ed86-a75e-4355-b815-be259faf9c8b-kube-api-access-2k8wg\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.078005 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/15f2ed86-a75e-4355-b815-be259faf9c8b-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.078034 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/15f2ed86-a75e-4355-b815-be259faf9c8b-registry-tls\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.078062 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/15f2ed86-a75e-4355-b815-be259faf9c8b-bound-sa-token\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.078118 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.097387 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.179180 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/15f2ed86-a75e-4355-b815-be259faf9c8b-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.179231 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/15f2ed86-a75e-4355-b815-be259faf9c8b-registry-tls\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.179254 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/15f2ed86-a75e-4355-b815-be259faf9c8b-bound-sa-token\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.179320 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/15f2ed86-a75e-4355-b815-be259faf9c8b-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.179379 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/15f2ed86-a75e-4355-b815-be259faf9c8b-registry-certificates\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.179402 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/15f2ed86-a75e-4355-b815-be259faf9c8b-trusted-ca\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.179429 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k8wg\" (UniqueName: \"kubernetes.io/projected/15f2ed86-a75e-4355-b815-be259faf9c8b-kube-api-access-2k8wg\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.180233 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/15f2ed86-a75e-4355-b815-be259faf9c8b-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.181448 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/15f2ed86-a75e-4355-b815-be259faf9c8b-registry-certificates\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.181495 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/15f2ed86-a75e-4355-b815-be259faf9c8b-trusted-ca\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.187705 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/15f2ed86-a75e-4355-b815-be259faf9c8b-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.187716 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/15f2ed86-a75e-4355-b815-be259faf9c8b-registry-tls\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.196214 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/15f2ed86-a75e-4355-b815-be259faf9c8b-bound-sa-token\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.199045 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k8wg\" (UniqueName: \"kubernetes.io/projected/15f2ed86-a75e-4355-b815-be259faf9c8b-kube-api-access-2k8wg\") pod \"image-registry-66df7c8f76-dghwt\" (UID: \"15f2ed86-a75e-4355-b815-be259faf9c8b\") " pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.309390 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:52 crc kubenswrapper[4840]: I1209 17:02:52.771804 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dghwt"] Dec 09 17:02:53 crc kubenswrapper[4840]: I1209 17:02:53.698676 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" event={"ID":"15f2ed86-a75e-4355-b815-be259faf9c8b","Type":"ContainerStarted","Data":"8e629e454fc577a76ebca8d6798ae78d2b1d98c3093371f0c7b1dfe508bb1c50"} Dec 09 17:02:53 crc kubenswrapper[4840]: I1209 17:02:53.698770 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" event={"ID":"15f2ed86-a75e-4355-b815-be259faf9c8b","Type":"ContainerStarted","Data":"5ceddcaee830a7f74602e9d71e439c759f8385ef86c15f77db48a890b480bb9a"} Dec 09 17:02:53 crc kubenswrapper[4840]: I1209 17:02:53.699342 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:02:53 crc kubenswrapper[4840]: I1209 17:02:53.724582 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" podStartSLOduration=2.724564806 podStartE2EDuration="2.724564806s" podCreationTimestamp="2025-12-09 17:02:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:02:53.722815605 +0000 UTC m=+359.713926248" watchObservedRunningTime="2025-12-09 17:02:53.724564806 +0000 UTC m=+359.715675449" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.297062 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l9nvq"] Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.298909 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.302311 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.325230 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l9nvq"] Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.415115 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9v896\" (UniqueName: \"kubernetes.io/projected/263ae667-376a-4b0b-8509-0342fddb0392-kube-api-access-9v896\") pod \"community-operators-l9nvq\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.415202 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-catalog-content\") pod \"community-operators-l9nvq\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.415280 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-utilities\") pod \"community-operators-l9nvq\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.501304 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nlmbr"] Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.504576 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.507642 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.509637 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nlmbr"] Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.517152 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9v896\" (UniqueName: \"kubernetes.io/projected/263ae667-376a-4b0b-8509-0342fddb0392-kube-api-access-9v896\") pod \"community-operators-l9nvq\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.517196 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-catalog-content\") pod \"community-operators-l9nvq\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.517231 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-utilities\") pod \"community-operators-l9nvq\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.518343 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-utilities\") pod \"community-operators-l9nvq\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.518919 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-catalog-content\") pod \"community-operators-l9nvq\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.539872 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9v896\" (UniqueName: \"kubernetes.io/projected/263ae667-376a-4b0b-8509-0342fddb0392-kube-api-access-9v896\") pod \"community-operators-l9nvq\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.618355 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-utilities\") pod \"certified-operators-nlmbr\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.618471 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-catalog-content\") pod \"certified-operators-nlmbr\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.618568 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wqlt\" (UniqueName: \"kubernetes.io/projected/1285ea6e-8612-4b0f-adad-d93db6553569-kube-api-access-7wqlt\") pod \"certified-operators-nlmbr\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.627234 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.720414 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-utilities\") pod \"certified-operators-nlmbr\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.721267 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-catalog-content\") pod \"certified-operators-nlmbr\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.721309 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wqlt\" (UniqueName: \"kubernetes.io/projected/1285ea6e-8612-4b0f-adad-d93db6553569-kube-api-access-7wqlt\") pod \"certified-operators-nlmbr\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.721543 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-utilities\") pod \"certified-operators-nlmbr\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.723557 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-catalog-content\") pod \"certified-operators-nlmbr\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.752011 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wqlt\" (UniqueName: \"kubernetes.io/projected/1285ea6e-8612-4b0f-adad-d93db6553569-kube-api-access-7wqlt\") pod \"certified-operators-nlmbr\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:02 crc kubenswrapper[4840]: I1209 17:03:02.823466 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:03 crc kubenswrapper[4840]: I1209 17:03:03.013931 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l9nvq"] Dec 09 17:03:03 crc kubenswrapper[4840]: I1209 17:03:03.245025 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nlmbr"] Dec 09 17:03:03 crc kubenswrapper[4840]: W1209 17:03:03.290692 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1285ea6e_8612_4b0f_adad_d93db6553569.slice/crio-e109f5c3dbe2bbc28ff0655786e76067831596d06b5f7f23f87b5df856b88082 WatchSource:0}: Error finding container e109f5c3dbe2bbc28ff0655786e76067831596d06b5f7f23f87b5df856b88082: Status 404 returned error can't find the container with id e109f5c3dbe2bbc28ff0655786e76067831596d06b5f7f23f87b5df856b88082 Dec 09 17:03:03 crc kubenswrapper[4840]: I1209 17:03:03.762155 4840 generic.go:334] "Generic (PLEG): container finished" podID="1285ea6e-8612-4b0f-adad-d93db6553569" containerID="63714f8c3ade66a80909e91568beb450e00cf714be0ea6115aa0f70d021b25a1" exitCode=0 Dec 09 17:03:03 crc kubenswrapper[4840]: I1209 17:03:03.762454 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlmbr" event={"ID":"1285ea6e-8612-4b0f-adad-d93db6553569","Type":"ContainerDied","Data":"63714f8c3ade66a80909e91568beb450e00cf714be0ea6115aa0f70d021b25a1"} Dec 09 17:03:03 crc kubenswrapper[4840]: I1209 17:03:03.762547 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlmbr" event={"ID":"1285ea6e-8612-4b0f-adad-d93db6553569","Type":"ContainerStarted","Data":"e109f5c3dbe2bbc28ff0655786e76067831596d06b5f7f23f87b5df856b88082"} Dec 09 17:03:03 crc kubenswrapper[4840]: I1209 17:03:03.764617 4840 generic.go:334] "Generic (PLEG): container finished" podID="263ae667-376a-4b0b-8509-0342fddb0392" containerID="05ad94fb2bbe77119d88d99910619b3fa29b497ef8dca659ab719772b7ad5330" exitCode=0 Dec 09 17:03:03 crc kubenswrapper[4840]: I1209 17:03:03.764659 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9nvq" event={"ID":"263ae667-376a-4b0b-8509-0342fddb0392","Type":"ContainerDied","Data":"05ad94fb2bbe77119d88d99910619b3fa29b497ef8dca659ab719772b7ad5330"} Dec 09 17:03:03 crc kubenswrapper[4840]: I1209 17:03:03.764696 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9nvq" event={"ID":"263ae667-376a-4b0b-8509-0342fddb0392","Type":"ContainerStarted","Data":"b157a6fa25402082b66803486227dcb301eafbbed375eff9571f92458c971774"} Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.036396 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.036483 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.092264 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qpk4p"] Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.093179 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.099420 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.116161 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qpk4p"] Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.243811 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fffa1314-10f7-4218-9351-34f74565e0b9-utilities\") pod \"redhat-marketplace-qpk4p\" (UID: \"fffa1314-10f7-4218-9351-34f74565e0b9\") " pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.244084 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzd29\" (UniqueName: \"kubernetes.io/projected/fffa1314-10f7-4218-9351-34f74565e0b9-kube-api-access-lzd29\") pod \"redhat-marketplace-qpk4p\" (UID: \"fffa1314-10f7-4218-9351-34f74565e0b9\") " pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.244204 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fffa1314-10f7-4218-9351-34f74565e0b9-catalog-content\") pod \"redhat-marketplace-qpk4p\" (UID: \"fffa1314-10f7-4218-9351-34f74565e0b9\") " pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.345137 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fffa1314-10f7-4218-9351-34f74565e0b9-utilities\") pod \"redhat-marketplace-qpk4p\" (UID: \"fffa1314-10f7-4218-9351-34f74565e0b9\") " pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.345207 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzd29\" (UniqueName: \"kubernetes.io/projected/fffa1314-10f7-4218-9351-34f74565e0b9-kube-api-access-lzd29\") pod \"redhat-marketplace-qpk4p\" (UID: \"fffa1314-10f7-4218-9351-34f74565e0b9\") " pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.345237 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fffa1314-10f7-4218-9351-34f74565e0b9-catalog-content\") pod \"redhat-marketplace-qpk4p\" (UID: \"fffa1314-10f7-4218-9351-34f74565e0b9\") " pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.346208 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fffa1314-10f7-4218-9351-34f74565e0b9-catalog-content\") pod \"redhat-marketplace-qpk4p\" (UID: \"fffa1314-10f7-4218-9351-34f74565e0b9\") " pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.346275 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fffa1314-10f7-4218-9351-34f74565e0b9-utilities\") pod \"redhat-marketplace-qpk4p\" (UID: \"fffa1314-10f7-4218-9351-34f74565e0b9\") " pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.364966 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzd29\" (UniqueName: \"kubernetes.io/projected/fffa1314-10f7-4218-9351-34f74565e0b9-kube-api-access-lzd29\") pod \"redhat-marketplace-qpk4p\" (UID: \"fffa1314-10f7-4218-9351-34f74565e0b9\") " pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.423667 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.777975 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9nvq" event={"ID":"263ae667-376a-4b0b-8509-0342fddb0392","Type":"ContainerStarted","Data":"a8da44282bbc1991bbd3042d491e15afa757927b6652bb8e9962018a61e7cd2e"} Dec 09 17:03:04 crc kubenswrapper[4840]: I1209 17:03:04.817944 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qpk4p"] Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.093428 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-g6b8g"] Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.095304 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.098284 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.114435 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g6b8g"] Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.261520 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgswj\" (UniqueName: \"kubernetes.io/projected/43bf6f9b-4624-4f32-828e-1ad2b7de2aa7-kube-api-access-tgswj\") pod \"redhat-operators-g6b8g\" (UID: \"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7\") " pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.261821 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43bf6f9b-4624-4f32-828e-1ad2b7de2aa7-catalog-content\") pod \"redhat-operators-g6b8g\" (UID: \"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7\") " pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.261989 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43bf6f9b-4624-4f32-828e-1ad2b7de2aa7-utilities\") pod \"redhat-operators-g6b8g\" (UID: \"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7\") " pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.363348 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgswj\" (UniqueName: \"kubernetes.io/projected/43bf6f9b-4624-4f32-828e-1ad2b7de2aa7-kube-api-access-tgswj\") pod \"redhat-operators-g6b8g\" (UID: \"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7\") " pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.363410 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43bf6f9b-4624-4f32-828e-1ad2b7de2aa7-catalog-content\") pod \"redhat-operators-g6b8g\" (UID: \"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7\") " pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.363511 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43bf6f9b-4624-4f32-828e-1ad2b7de2aa7-utilities\") pod \"redhat-operators-g6b8g\" (UID: \"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7\") " pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.364216 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43bf6f9b-4624-4f32-828e-1ad2b7de2aa7-utilities\") pod \"redhat-operators-g6b8g\" (UID: \"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7\") " pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.364379 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43bf6f9b-4624-4f32-828e-1ad2b7de2aa7-catalog-content\") pod \"redhat-operators-g6b8g\" (UID: \"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7\") " pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.389800 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgswj\" (UniqueName: \"kubernetes.io/projected/43bf6f9b-4624-4f32-828e-1ad2b7de2aa7-kube-api-access-tgswj\") pod \"redhat-operators-g6b8g\" (UID: \"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7\") " pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.427454 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.788290 4840 generic.go:334] "Generic (PLEG): container finished" podID="263ae667-376a-4b0b-8509-0342fddb0392" containerID="a8da44282bbc1991bbd3042d491e15afa757927b6652bb8e9962018a61e7cd2e" exitCode=0 Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.788327 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9nvq" event={"ID":"263ae667-376a-4b0b-8509-0342fddb0392","Type":"ContainerDied","Data":"a8da44282bbc1991bbd3042d491e15afa757927b6652bb8e9962018a61e7cd2e"} Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.794329 4840 generic.go:334] "Generic (PLEG): container finished" podID="1285ea6e-8612-4b0f-adad-d93db6553569" containerID="9760c86ca8f9c83b7fd96fcce78a7c1dbb7c0fb70d0b51eb7ce84d8a8e4e3f86" exitCode=0 Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.794724 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlmbr" event={"ID":"1285ea6e-8612-4b0f-adad-d93db6553569","Type":"ContainerDied","Data":"9760c86ca8f9c83b7fd96fcce78a7c1dbb7c0fb70d0b51eb7ce84d8a8e4e3f86"} Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.798315 4840 generic.go:334] "Generic (PLEG): container finished" podID="fffa1314-10f7-4218-9351-34f74565e0b9" containerID="f0beabe3b7de3235072c968efe1158a38153b5db5191bd01b73b0170a8f99254" exitCode=0 Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.798358 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qpk4p" event={"ID":"fffa1314-10f7-4218-9351-34f74565e0b9","Type":"ContainerDied","Data":"f0beabe3b7de3235072c968efe1158a38153b5db5191bd01b73b0170a8f99254"} Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.798387 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qpk4p" event={"ID":"fffa1314-10f7-4218-9351-34f74565e0b9","Type":"ContainerStarted","Data":"ad1cbd5efbaa8f5d16831cdca1300c768fed0602e5b3965c773eac85d22f898b"} Dec 09 17:03:05 crc kubenswrapper[4840]: I1209 17:03:05.883876 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g6b8g"] Dec 09 17:03:05 crc kubenswrapper[4840]: W1209 17:03:05.889892 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43bf6f9b_4624_4f32_828e_1ad2b7de2aa7.slice/crio-c8a62225c3886104f270421f2f57a28321b3e02b8f7ddfcc6655e366fa42378c WatchSource:0}: Error finding container c8a62225c3886104f270421f2f57a28321b3e02b8f7ddfcc6655e366fa42378c: Status 404 returned error can't find the container with id c8a62225c3886104f270421f2f57a28321b3e02b8f7ddfcc6655e366fa42378c Dec 09 17:03:06 crc kubenswrapper[4840]: I1209 17:03:06.807037 4840 generic.go:334] "Generic (PLEG): container finished" podID="43bf6f9b-4624-4f32-828e-1ad2b7de2aa7" containerID="8520597f2ca7d48408c2e0931112260dd5a0dc5d7c210833d44a231afe6183b9" exitCode=0 Dec 09 17:03:06 crc kubenswrapper[4840]: I1209 17:03:06.807124 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6b8g" event={"ID":"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7","Type":"ContainerDied","Data":"8520597f2ca7d48408c2e0931112260dd5a0dc5d7c210833d44a231afe6183b9"} Dec 09 17:03:06 crc kubenswrapper[4840]: I1209 17:03:06.807376 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6b8g" event={"ID":"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7","Type":"ContainerStarted","Data":"c8a62225c3886104f270421f2f57a28321b3e02b8f7ddfcc6655e366fa42378c"} Dec 09 17:03:06 crc kubenswrapper[4840]: I1209 17:03:06.813808 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9nvq" event={"ID":"263ae667-376a-4b0b-8509-0342fddb0392","Type":"ContainerStarted","Data":"4deaee9ca5268ef4d0cb4ebc3b7d69150edcc29cbbe8bc30b154566d1c24b5de"} Dec 09 17:03:06 crc kubenswrapper[4840]: I1209 17:03:06.816834 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlmbr" event={"ID":"1285ea6e-8612-4b0f-adad-d93db6553569","Type":"ContainerStarted","Data":"a7c863e65f48387d426bba34f8371acabec4f51913e797cc98e4679cdbee24c0"} Dec 09 17:03:06 crc kubenswrapper[4840]: I1209 17:03:06.851436 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nlmbr" podStartSLOduration=2.297029574 podStartE2EDuration="4.85141766s" podCreationTimestamp="2025-12-09 17:03:02 +0000 UTC" firstStartedPulling="2025-12-09 17:03:03.763985206 +0000 UTC m=+369.755095839" lastFinishedPulling="2025-12-09 17:03:06.318373292 +0000 UTC m=+372.309483925" observedRunningTime="2025-12-09 17:03:06.850695819 +0000 UTC m=+372.841806462" watchObservedRunningTime="2025-12-09 17:03:06.85141766 +0000 UTC m=+372.842528293" Dec 09 17:03:06 crc kubenswrapper[4840]: I1209 17:03:06.877139 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l9nvq" podStartSLOduration=2.258626913 podStartE2EDuration="4.877111248s" podCreationTimestamp="2025-12-09 17:03:02 +0000 UTC" firstStartedPulling="2025-12-09 17:03:03.766562842 +0000 UTC m=+369.757673485" lastFinishedPulling="2025-12-09 17:03:06.385047187 +0000 UTC m=+372.376157820" observedRunningTime="2025-12-09 17:03:06.866574497 +0000 UTC m=+372.857685130" watchObservedRunningTime="2025-12-09 17:03:06.877111248 +0000 UTC m=+372.868221881" Dec 09 17:03:07 crc kubenswrapper[4840]: I1209 17:03:07.823794 4840 generic.go:334] "Generic (PLEG): container finished" podID="fffa1314-10f7-4218-9351-34f74565e0b9" containerID="0154a6ea3ae931690bb2b4458a969d7ddfdf7de1c6fe69d098fb9d79ee2f3dcf" exitCode=0 Dec 09 17:03:07 crc kubenswrapper[4840]: I1209 17:03:07.823872 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qpk4p" event={"ID":"fffa1314-10f7-4218-9351-34f74565e0b9","Type":"ContainerDied","Data":"0154a6ea3ae931690bb2b4458a969d7ddfdf7de1c6fe69d098fb9d79ee2f3dcf"} Dec 09 17:03:07 crc kubenswrapper[4840]: I1209 17:03:07.826278 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6b8g" event={"ID":"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7","Type":"ContainerStarted","Data":"a090f193a1c12a3ad78a4e2b14eb2640a3f8215890973c17e6b3780078231b2c"} Dec 09 17:03:08 crc kubenswrapper[4840]: I1209 17:03:08.834755 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qpk4p" event={"ID":"fffa1314-10f7-4218-9351-34f74565e0b9","Type":"ContainerStarted","Data":"63ac93c2454ef18ae61830b1bb2564adc01e918c01c4534a5bd6c4e6177083b3"} Dec 09 17:03:08 crc kubenswrapper[4840]: I1209 17:03:08.838915 4840 generic.go:334] "Generic (PLEG): container finished" podID="43bf6f9b-4624-4f32-828e-1ad2b7de2aa7" containerID="a090f193a1c12a3ad78a4e2b14eb2640a3f8215890973c17e6b3780078231b2c" exitCode=0 Dec 09 17:03:08 crc kubenswrapper[4840]: I1209 17:03:08.838947 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6b8g" event={"ID":"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7","Type":"ContainerDied","Data":"a090f193a1c12a3ad78a4e2b14eb2640a3f8215890973c17e6b3780078231b2c"} Dec 09 17:03:08 crc kubenswrapper[4840]: I1209 17:03:08.865610 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qpk4p" podStartSLOduration=2.3374141330000002 podStartE2EDuration="4.865589665s" podCreationTimestamp="2025-12-09 17:03:04 +0000 UTC" firstStartedPulling="2025-12-09 17:03:05.801578872 +0000 UTC m=+371.792689535" lastFinishedPulling="2025-12-09 17:03:08.329754444 +0000 UTC m=+374.320865067" observedRunningTime="2025-12-09 17:03:08.862556896 +0000 UTC m=+374.853667539" watchObservedRunningTime="2025-12-09 17:03:08.865589665 +0000 UTC m=+374.856700298" Dec 09 17:03:09 crc kubenswrapper[4840]: I1209 17:03:09.849453 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6b8g" event={"ID":"43bf6f9b-4624-4f32-828e-1ad2b7de2aa7","Type":"ContainerStarted","Data":"edbba7554869064c767286bdae3ddfbbe42238790d5547a9197bd80640323ad3"} Dec 09 17:03:09 crc kubenswrapper[4840]: I1209 17:03:09.872681 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-g6b8g" podStartSLOduration=2.233194588 podStartE2EDuration="4.872657733s" podCreationTimestamp="2025-12-09 17:03:05 +0000 UTC" firstStartedPulling="2025-12-09 17:03:06.808591187 +0000 UTC m=+372.799701820" lastFinishedPulling="2025-12-09 17:03:09.448054322 +0000 UTC m=+375.439164965" observedRunningTime="2025-12-09 17:03:09.869802469 +0000 UTC m=+375.860913132" watchObservedRunningTime="2025-12-09 17:03:09.872657733 +0000 UTC m=+375.863768396" Dec 09 17:03:12 crc kubenswrapper[4840]: I1209 17:03:12.316129 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-dghwt" Dec 09 17:03:12 crc kubenswrapper[4840]: I1209 17:03:12.384366 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bczl5"] Dec 09 17:03:12 crc kubenswrapper[4840]: I1209 17:03:12.627393 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:12 crc kubenswrapper[4840]: I1209 17:03:12.627435 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:12 crc kubenswrapper[4840]: I1209 17:03:12.667727 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:12 crc kubenswrapper[4840]: I1209 17:03:12.824357 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:12 crc kubenswrapper[4840]: I1209 17:03:12.824424 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:12 crc kubenswrapper[4840]: I1209 17:03:12.901588 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:12 crc kubenswrapper[4840]: I1209 17:03:12.937454 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l9nvq" Dec 09 17:03:12 crc kubenswrapper[4840]: I1209 17:03:12.961397 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:03:14 crc kubenswrapper[4840]: I1209 17:03:14.423996 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:14 crc kubenswrapper[4840]: I1209 17:03:14.424296 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:14 crc kubenswrapper[4840]: I1209 17:03:14.469610 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:14 crc kubenswrapper[4840]: I1209 17:03:14.924040 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qpk4p" Dec 09 17:03:15 crc kubenswrapper[4840]: I1209 17:03:15.427906 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:15 crc kubenswrapper[4840]: I1209 17:03:15.427972 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:15 crc kubenswrapper[4840]: I1209 17:03:15.475193 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:15 crc kubenswrapper[4840]: I1209 17:03:15.921760 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-g6b8g" Dec 09 17:03:34 crc kubenswrapper[4840]: I1209 17:03:34.035701 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:03:34 crc kubenswrapper[4840]: I1209 17:03:34.036266 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:03:34 crc kubenswrapper[4840]: I1209 17:03:34.036306 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:03:34 crc kubenswrapper[4840]: I1209 17:03:34.036810 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"55d6696c293e370b251a25d145325d574fbb55d04b257ef61902582c5658c786"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:03:34 crc kubenswrapper[4840]: I1209 17:03:34.036855 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://55d6696c293e370b251a25d145325d574fbb55d04b257ef61902582c5658c786" gracePeriod=600 Dec 09 17:03:34 crc kubenswrapper[4840]: I1209 17:03:34.992799 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="55d6696c293e370b251a25d145325d574fbb55d04b257ef61902582c5658c786" exitCode=0 Dec 09 17:03:34 crc kubenswrapper[4840]: I1209 17:03:34.992867 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"55d6696c293e370b251a25d145325d574fbb55d04b257ef61902582c5658c786"} Dec 09 17:03:34 crc kubenswrapper[4840]: I1209 17:03:34.993190 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"09f6a49f6439950d858bfbcc221bf4d2c913dd3fdfb026b55c0db62f0490a808"} Dec 09 17:03:34 crc kubenswrapper[4840]: I1209 17:03:34.993219 4840 scope.go:117] "RemoveContainer" containerID="38b555715456e7f1f774f656fce54ae1211d0116fccc42ec63c49c649353736c" Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.421024 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" podUID="c648416a-e4c7-4ce4-97e5-33393cead15e" containerName="registry" containerID="cri-o://be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53" gracePeriod=30 Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.814459 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.923018 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c648416a-e4c7-4ce4-97e5-33393cead15e-installation-pull-secrets\") pod \"c648416a-e4c7-4ce4-97e5-33393cead15e\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.923177 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"c648416a-e4c7-4ce4-97e5-33393cead15e\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.923212 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-trusted-ca\") pod \"c648416a-e4c7-4ce4-97e5-33393cead15e\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.923242 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-tls\") pod \"c648416a-e4c7-4ce4-97e5-33393cead15e\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.923261 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pq6v6\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-kube-api-access-pq6v6\") pod \"c648416a-e4c7-4ce4-97e5-33393cead15e\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.923284 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c648416a-e4c7-4ce4-97e5-33393cead15e-ca-trust-extracted\") pod \"c648416a-e4c7-4ce4-97e5-33393cead15e\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.923310 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-certificates\") pod \"c648416a-e4c7-4ce4-97e5-33393cead15e\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.923359 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-bound-sa-token\") pod \"c648416a-e4c7-4ce4-97e5-33393cead15e\" (UID: \"c648416a-e4c7-4ce4-97e5-33393cead15e\") " Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.925524 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "c648416a-e4c7-4ce4-97e5-33393cead15e" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.925545 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "c648416a-e4c7-4ce4-97e5-33393cead15e" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.929846 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "c648416a-e4c7-4ce4-97e5-33393cead15e" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.933013 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c648416a-e4c7-4ce4-97e5-33393cead15e-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "c648416a-e4c7-4ce4-97e5-33393cead15e" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.934436 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-kube-api-access-pq6v6" (OuterVolumeSpecName: "kube-api-access-pq6v6") pod "c648416a-e4c7-4ce4-97e5-33393cead15e" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e"). InnerVolumeSpecName "kube-api-access-pq6v6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.934620 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "c648416a-e4c7-4ce4-97e5-33393cead15e" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.940948 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c648416a-e4c7-4ce4-97e5-33393cead15e-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "c648416a-e4c7-4ce4-97e5-33393cead15e" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:03:37 crc kubenswrapper[4840]: I1209 17:03:37.947410 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "c648416a-e4c7-4ce4-97e5-33393cead15e" (UID: "c648416a-e4c7-4ce4-97e5-33393cead15e"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.013365 4840 generic.go:334] "Generic (PLEG): container finished" podID="c648416a-e4c7-4ce4-97e5-33393cead15e" containerID="be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53" exitCode=0 Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.013426 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" event={"ID":"c648416a-e4c7-4ce4-97e5-33393cead15e","Type":"ContainerDied","Data":"be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53"} Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.013460 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" event={"ID":"c648416a-e4c7-4ce4-97e5-33393cead15e","Type":"ContainerDied","Data":"80f4c4336073bcafd279da52282a8e7db8f504efefafb31fe888081272f0dbd0"} Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.013483 4840 scope.go:117] "RemoveContainer" containerID="be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.014032 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bczl5" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.030401 4840 scope.go:117] "RemoveContainer" containerID="be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.030949 4840 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c648416a-e4c7-4ce4-97e5-33393cead15e-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.031009 4840 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.031024 4840 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.031036 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pq6v6\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-kube-api-access-pq6v6\") on node \"crc\" DevicePath \"\"" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.031048 4840 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c648416a-e4c7-4ce4-97e5-33393cead15e-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.031062 4840 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c648416a-e4c7-4ce4-97e5-33393cead15e-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.031075 4840 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c648416a-e4c7-4ce4-97e5-33393cead15e-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 09 17:03:38 crc kubenswrapper[4840]: E1209 17:03:38.031168 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53\": container with ID starting with be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53 not found: ID does not exist" containerID="be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.031203 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53"} err="failed to get container status \"be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53\": rpc error: code = NotFound desc = could not find container \"be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53\": container with ID starting with be20cd97763c30f68dd96fea28bb98b4bb77ad0cb7f9d8f8cfa490373126df53 not found: ID does not exist" Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.047626 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bczl5"] Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.051697 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bczl5"] Dec 09 17:03:38 crc kubenswrapper[4840]: I1209 17:03:38.614550 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c648416a-e4c7-4ce4-97e5-33393cead15e" path="/var/lib/kubelet/pods/c648416a-e4c7-4ce4-97e5-33393cead15e/volumes" Dec 09 17:05:34 crc kubenswrapper[4840]: I1209 17:05:34.037021 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:05:34 crc kubenswrapper[4840]: I1209 17:05:34.037646 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:06:04 crc kubenswrapper[4840]: I1209 17:06:04.036271 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:06:04 crc kubenswrapper[4840]: I1209 17:06:04.036855 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:06:34 crc kubenswrapper[4840]: I1209 17:06:34.036853 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:06:34 crc kubenswrapper[4840]: I1209 17:06:34.037555 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:06:34 crc kubenswrapper[4840]: I1209 17:06:34.037665 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:06:34 crc kubenswrapper[4840]: I1209 17:06:34.038662 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"09f6a49f6439950d858bfbcc221bf4d2c913dd3fdfb026b55c0db62f0490a808"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:06:34 crc kubenswrapper[4840]: I1209 17:06:34.038775 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://09f6a49f6439950d858bfbcc221bf4d2c913dd3fdfb026b55c0db62f0490a808" gracePeriod=600 Dec 09 17:06:35 crc kubenswrapper[4840]: I1209 17:06:35.152441 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="09f6a49f6439950d858bfbcc221bf4d2c913dd3fdfb026b55c0db62f0490a808" exitCode=0 Dec 09 17:06:35 crc kubenswrapper[4840]: I1209 17:06:35.152492 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"09f6a49f6439950d858bfbcc221bf4d2c913dd3fdfb026b55c0db62f0490a808"} Dec 09 17:06:35 crc kubenswrapper[4840]: I1209 17:06:35.152942 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"0f2e3a684de848e9b5fa655a4ae5fc5ca866cec15401f4140baa9862458b991d"} Dec 09 17:06:35 crc kubenswrapper[4840]: I1209 17:06:35.152981 4840 scope.go:117] "RemoveContainer" containerID="55d6696c293e370b251a25d145325d574fbb55d04b257ef61902582c5658c786" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.468888 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82"] Dec 09 17:08:19 crc kubenswrapper[4840]: E1209 17:08:19.469699 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c648416a-e4c7-4ce4-97e5-33393cead15e" containerName="registry" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.469720 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="c648416a-e4c7-4ce4-97e5-33393cead15e" containerName="registry" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.469847 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="c648416a-e4c7-4ce4-97e5-33393cead15e" containerName="registry" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.470517 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.472709 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.482557 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82"] Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.582747 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.582866 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.582922 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdld8\" (UniqueName: \"kubernetes.io/projected/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-kube-api-access-jdld8\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.683901 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdld8\" (UniqueName: \"kubernetes.io/projected/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-kube-api-access-jdld8\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.684046 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.684149 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.684812 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.684812 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.706487 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdld8\" (UniqueName: \"kubernetes.io/projected/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-kube-api-access-jdld8\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:19 crc kubenswrapper[4840]: I1209 17:08:19.791354 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:20 crc kubenswrapper[4840]: I1209 17:08:20.052812 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82"] Dec 09 17:08:20 crc kubenswrapper[4840]: I1209 17:08:20.875094 4840 generic.go:334] "Generic (PLEG): container finished" podID="a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" containerID="cd73ec9016109b2b8931fafa5f275d4eb003c31a42b55cc0ac62206649570186" exitCode=0 Dec 09 17:08:20 crc kubenswrapper[4840]: I1209 17:08:20.875432 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" event={"ID":"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6","Type":"ContainerDied","Data":"cd73ec9016109b2b8931fafa5f275d4eb003c31a42b55cc0ac62206649570186"} Dec 09 17:08:20 crc kubenswrapper[4840]: I1209 17:08:20.875465 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" event={"ID":"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6","Type":"ContainerStarted","Data":"85b653fdf3a56f381d231ba3ee290fa13b95b3f12f9c9a708b977aff1a6fc947"} Dec 09 17:08:20 crc kubenswrapper[4840]: I1209 17:08:20.877755 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 17:08:22 crc kubenswrapper[4840]: I1209 17:08:22.891271 4840 generic.go:334] "Generic (PLEG): container finished" podID="a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" containerID="ab4de22f6ae4d70491ec34ada9abbd3357741653ab6f9229ae2e7253701b5f57" exitCode=0 Dec 09 17:08:22 crc kubenswrapper[4840]: I1209 17:08:22.891451 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" event={"ID":"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6","Type":"ContainerDied","Data":"ab4de22f6ae4d70491ec34ada9abbd3357741653ab6f9229ae2e7253701b5f57"} Dec 09 17:08:23 crc kubenswrapper[4840]: I1209 17:08:23.900619 4840 generic.go:334] "Generic (PLEG): container finished" podID="a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" containerID="8a697ef4997c5dfb8756b585f0d71fb84d3cf80224dc26ad9ce1b3ddf17f6071" exitCode=0 Dec 09 17:08:23 crc kubenswrapper[4840]: I1209 17:08:23.900714 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" event={"ID":"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6","Type":"ContainerDied","Data":"8a697ef4997c5dfb8756b585f0d71fb84d3cf80224dc26ad9ce1b3ddf17f6071"} Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.215993 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.359762 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-util\") pod \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.359961 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-bundle\") pod \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.360034 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdld8\" (UniqueName: \"kubernetes.io/projected/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-kube-api-access-jdld8\") pod \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\" (UID: \"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6\") " Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.364233 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-bundle" (OuterVolumeSpecName: "bundle") pod "a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" (UID: "a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.365754 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-kube-api-access-jdld8" (OuterVolumeSpecName: "kube-api-access-jdld8") pod "a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" (UID: "a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6"). InnerVolumeSpecName "kube-api-access-jdld8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.375455 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-util" (OuterVolumeSpecName: "util") pod "a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" (UID: "a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.461204 4840 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.461242 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdld8\" (UniqueName: \"kubernetes.io/projected/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-kube-api-access-jdld8\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.461257 4840 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6-util\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.917737 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" event={"ID":"a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6","Type":"ContainerDied","Data":"85b653fdf3a56f381d231ba3ee290fa13b95b3f12f9c9a708b977aff1a6fc947"} Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.917792 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85b653fdf3a56f381d231ba3ee290fa13b95b3f12f9c9a708b977aff1a6fc947" Dec 09 17:08:25 crc kubenswrapper[4840]: I1209 17:08:25.917869 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82" Dec 09 17:08:34 crc kubenswrapper[4840]: I1209 17:08:34.036097 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:08:34 crc kubenswrapper[4840]: I1209 17:08:34.036620 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.480184 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-qdpq8"] Dec 09 17:08:37 crc kubenswrapper[4840]: E1209 17:08:37.480672 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" containerName="pull" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.480687 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" containerName="pull" Dec 09 17:08:37 crc kubenswrapper[4840]: E1209 17:08:37.480701 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" containerName="extract" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.480708 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" containerName="extract" Dec 09 17:08:37 crc kubenswrapper[4840]: E1209 17:08:37.480721 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" containerName="util" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.480728 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" containerName="util" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.480835 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6" containerName="extract" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.481409 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qdpq8" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.483675 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.483923 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.484362 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-97cg9" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.499491 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-qdpq8"] Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.608951 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j"] Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.609752 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.611797 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.614902 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-jnvzc" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.620865 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rglvq\" (UniqueName: \"kubernetes.io/projected/251a8643-2e5b-4ac3-997d-b275bb1f6d25-kube-api-access-rglvq\") pod \"obo-prometheus-operator-668cf9dfbb-qdpq8\" (UID: \"251a8643-2e5b-4ac3-997d-b275bb1f6d25\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qdpq8" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.624813 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j"] Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.629127 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz"] Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.629904 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.638536 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz"] Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.721775 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1e62864a-4c1d-4543-a379-ce30d0e68ea6-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j\" (UID: \"1e62864a-4c1d-4543-a379-ce30d0e68ea6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.722070 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1e62864a-4c1d-4543-a379-ce30d0e68ea6-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j\" (UID: \"1e62864a-4c1d-4543-a379-ce30d0e68ea6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.722262 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rglvq\" (UniqueName: \"kubernetes.io/projected/251a8643-2e5b-4ac3-997d-b275bb1f6d25-kube-api-access-rglvq\") pod \"obo-prometheus-operator-668cf9dfbb-qdpq8\" (UID: \"251a8643-2e5b-4ac3-997d-b275bb1f6d25\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qdpq8" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.740190 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rglvq\" (UniqueName: \"kubernetes.io/projected/251a8643-2e5b-4ac3-997d-b275bb1f6d25-kube-api-access-rglvq\") pod \"obo-prometheus-operator-668cf9dfbb-qdpq8\" (UID: \"251a8643-2e5b-4ac3-997d-b275bb1f6d25\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qdpq8" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.800194 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qdpq8" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.827600 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c9b0db50-828e-4e51-9ea5-099a055f6c0f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz\" (UID: \"c9b0db50-828e-4e51-9ea5-099a055f6c0f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.827740 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1e62864a-4c1d-4543-a379-ce30d0e68ea6-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j\" (UID: \"1e62864a-4c1d-4543-a379-ce30d0e68ea6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.827782 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c9b0db50-828e-4e51-9ea5-099a055f6c0f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz\" (UID: \"c9b0db50-828e-4e51-9ea5-099a055f6c0f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.827822 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1e62864a-4c1d-4543-a379-ce30d0e68ea6-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j\" (UID: \"1e62864a-4c1d-4543-a379-ce30d0e68ea6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.832567 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1e62864a-4c1d-4543-a379-ce30d0e68ea6-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j\" (UID: \"1e62864a-4c1d-4543-a379-ce30d0e68ea6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.846572 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1e62864a-4c1d-4543-a379-ce30d0e68ea6-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j\" (UID: \"1e62864a-4c1d-4543-a379-ce30d0e68ea6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.849106 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-sqt7j"] Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.849752 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.852404 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.852612 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-hv4tb" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.861076 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-sqt7j"] Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.928600 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ddf0a1fc-865e-44d5-b4e9-f470cbcbc031-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-sqt7j\" (UID: \"ddf0a1fc-865e-44d5-b4e9-f470cbcbc031\") " pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.929073 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.929308 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c9b0db50-828e-4e51-9ea5-099a055f6c0f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz\" (UID: \"c9b0db50-828e-4e51-9ea5-099a055f6c0f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.929364 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c9b0db50-828e-4e51-9ea5-099a055f6c0f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz\" (UID: \"c9b0db50-828e-4e51-9ea5-099a055f6c0f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.929383 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs4vs\" (UniqueName: \"kubernetes.io/projected/ddf0a1fc-865e-44d5-b4e9-f470cbcbc031-kube-api-access-xs4vs\") pod \"observability-operator-d8bb48f5d-sqt7j\" (UID: \"ddf0a1fc-865e-44d5-b4e9-f470cbcbc031\") " pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.938492 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c9b0db50-828e-4e51-9ea5-099a055f6c0f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz\" (UID: \"c9b0db50-828e-4e51-9ea5-099a055f6c0f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz" Dec 09 17:08:37 crc kubenswrapper[4840]: I1209 17:08:37.949594 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c9b0db50-828e-4e51-9ea5-099a055f6c0f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz\" (UID: \"c9b0db50-828e-4e51-9ea5-099a055f6c0f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.021858 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-wc2mq"] Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.022508 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-wc2mq" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.026556 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-24gz5" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.030752 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/06d88094-c4e9-4872-a546-6c42f9626286-openshift-service-ca\") pod \"perses-operator-5446b9c989-wc2mq\" (UID: \"06d88094-c4e9-4872-a546-6c42f9626286\") " pod="openshift-operators/perses-operator-5446b9c989-wc2mq" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.030807 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs4vs\" (UniqueName: \"kubernetes.io/projected/ddf0a1fc-865e-44d5-b4e9-f470cbcbc031-kube-api-access-xs4vs\") pod \"observability-operator-d8bb48f5d-sqt7j\" (UID: \"ddf0a1fc-865e-44d5-b4e9-f470cbcbc031\") " pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.030849 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ddf0a1fc-865e-44d5-b4e9-f470cbcbc031-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-sqt7j\" (UID: \"ddf0a1fc-865e-44d5-b4e9-f470cbcbc031\") " pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.030885 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpcvl\" (UniqueName: \"kubernetes.io/projected/06d88094-c4e9-4872-a546-6c42f9626286-kube-api-access-dpcvl\") pod \"perses-operator-5446b9c989-wc2mq\" (UID: \"06d88094-c4e9-4872-a546-6c42f9626286\") " pod="openshift-operators/perses-operator-5446b9c989-wc2mq" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.036431 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ddf0a1fc-865e-44d5-b4e9-f470cbcbc031-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-sqt7j\" (UID: \"ddf0a1fc-865e-44d5-b4e9-f470cbcbc031\") " pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.051351 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-wc2mq"] Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.063033 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs4vs\" (UniqueName: \"kubernetes.io/projected/ddf0a1fc-865e-44d5-b4e9-f470cbcbc031-kube-api-access-xs4vs\") pod \"observability-operator-d8bb48f5d-sqt7j\" (UID: \"ddf0a1fc-865e-44d5-b4e9-f470cbcbc031\") " pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.088477 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-qdpq8"] Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.131460 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/06d88094-c4e9-4872-a546-6c42f9626286-openshift-service-ca\") pod \"perses-operator-5446b9c989-wc2mq\" (UID: \"06d88094-c4e9-4872-a546-6c42f9626286\") " pod="openshift-operators/perses-operator-5446b9c989-wc2mq" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.131522 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpcvl\" (UniqueName: \"kubernetes.io/projected/06d88094-c4e9-4872-a546-6c42f9626286-kube-api-access-dpcvl\") pod \"perses-operator-5446b9c989-wc2mq\" (UID: \"06d88094-c4e9-4872-a546-6c42f9626286\") " pod="openshift-operators/perses-operator-5446b9c989-wc2mq" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.132722 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/06d88094-c4e9-4872-a546-6c42f9626286-openshift-service-ca\") pod \"perses-operator-5446b9c989-wc2mq\" (UID: \"06d88094-c4e9-4872-a546-6c42f9626286\") " pod="openshift-operators/perses-operator-5446b9c989-wc2mq" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.149551 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpcvl\" (UniqueName: \"kubernetes.io/projected/06d88094-c4e9-4872-a546-6c42f9626286-kube-api-access-dpcvl\") pod \"perses-operator-5446b9c989-wc2mq\" (UID: \"06d88094-c4e9-4872-a546-6c42f9626286\") " pod="openshift-operators/perses-operator-5446b9c989-wc2mq" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.196648 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.239219 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j"] Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.253256 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz" Dec 09 17:08:38 crc kubenswrapper[4840]: W1209 17:08:38.309764 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1e62864a_4c1d_4543_a379_ce30d0e68ea6.slice/crio-2625f9d094697154aac4ed4bf6f03836001410c5ba41d45d668a8f5976feca7f WatchSource:0}: Error finding container 2625f9d094697154aac4ed4bf6f03836001410c5ba41d45d668a8f5976feca7f: Status 404 returned error can't find the container with id 2625f9d094697154aac4ed4bf6f03836001410c5ba41d45d668a8f5976feca7f Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.344226 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-wc2mq" Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.510307 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-sqt7j"] Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.620278 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-wc2mq"] Dec 09 17:08:38 crc kubenswrapper[4840]: W1209 17:08:38.625794 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06d88094_c4e9_4872_a546_6c42f9626286.slice/crio-3fc43e1613de1b72f334c6e0c24524cdb9a579ad7f5bf5167264074c79bb588b WatchSource:0}: Error finding container 3fc43e1613de1b72f334c6e0c24524cdb9a579ad7f5bf5167264074c79bb588b: Status 404 returned error can't find the container with id 3fc43e1613de1b72f334c6e0c24524cdb9a579ad7f5bf5167264074c79bb588b Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.802166 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz"] Dec 09 17:08:38 crc kubenswrapper[4840]: W1209 17:08:38.814319 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9b0db50_828e_4e51_9ea5_099a055f6c0f.slice/crio-b6e509649f0429a0d716af30203c5d35aef6cb57cb2991fce6d63de24088fdd3 WatchSource:0}: Error finding container b6e509649f0429a0d716af30203c5d35aef6cb57cb2991fce6d63de24088fdd3: Status 404 returned error can't find the container with id b6e509649f0429a0d716af30203c5d35aef6cb57cb2991fce6d63de24088fdd3 Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.998537 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j" event={"ID":"1e62864a-4c1d-4543-a379-ce30d0e68ea6","Type":"ContainerStarted","Data":"2625f9d094697154aac4ed4bf6f03836001410c5ba41d45d668a8f5976feca7f"} Dec 09 17:08:38 crc kubenswrapper[4840]: I1209 17:08:38.999792 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz" event={"ID":"c9b0db50-828e-4e51-9ea5-099a055f6c0f","Type":"ContainerStarted","Data":"b6e509649f0429a0d716af30203c5d35aef6cb57cb2991fce6d63de24088fdd3"} Dec 09 17:08:39 crc kubenswrapper[4840]: I1209 17:08:39.000689 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" event={"ID":"ddf0a1fc-865e-44d5-b4e9-f470cbcbc031","Type":"ContainerStarted","Data":"3e83f10b6e6d2937ad6dfc42e3e2c9fcd654b2affebd69e92a9d2e17ecc6601d"} Dec 09 17:08:39 crc kubenswrapper[4840]: I1209 17:08:39.001592 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qdpq8" event={"ID":"251a8643-2e5b-4ac3-997d-b275bb1f6d25","Type":"ContainerStarted","Data":"3dbd87b4a6c48b9589a766de330e1abf1b20f39f7c694de9c8c8ced66b92b9a1"} Dec 09 17:08:39 crc kubenswrapper[4840]: I1209 17:08:39.002374 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-wc2mq" event={"ID":"06d88094-c4e9-4872-a546-6c42f9626286","Type":"ContainerStarted","Data":"3fc43e1613de1b72f334c6e0c24524cdb9a579ad7f5bf5167264074c79bb588b"} Dec 09 17:08:45 crc kubenswrapper[4840]: I1209 17:08:45.184308 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lpfl9"] Dec 09 17:08:45 crc kubenswrapper[4840]: I1209 17:08:45.185306 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovn-controller" containerID="cri-o://2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c" gracePeriod=30 Dec 09 17:08:45 crc kubenswrapper[4840]: I1209 17:08:45.185364 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="northd" containerID="cri-o://5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861" gracePeriod=30 Dec 09 17:08:45 crc kubenswrapper[4840]: I1209 17:08:45.185384 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="kube-rbac-proxy-node" containerID="cri-o://85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba" gracePeriod=30 Dec 09 17:08:45 crc kubenswrapper[4840]: I1209 17:08:45.185423 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovn-acl-logging" containerID="cri-o://dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5" gracePeriod=30 Dec 09 17:08:45 crc kubenswrapper[4840]: I1209 17:08:45.185512 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa" gracePeriod=30 Dec 09 17:08:45 crc kubenswrapper[4840]: I1209 17:08:45.185553 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="sbdb" containerID="cri-o://2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c" gracePeriod=30 Dec 09 17:08:45 crc kubenswrapper[4840]: I1209 17:08:45.185591 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="nbdb" containerID="cri-o://5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096" gracePeriod=30 Dec 09 17:08:45 crc kubenswrapper[4840]: I1209 17:08:45.226086 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" containerID="cri-o://0b264648af59c3553deaa61e50dd6d64c709043b7d9b08b6cb4a2bb87a665604" gracePeriod=30 Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.053298 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n2cr9_9c465ec1-5011-46d7-bcf3-df79d8b4543b/kube-multus/2.log" Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.054210 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n2cr9_9c465ec1-5011-46d7-bcf3-df79d8b4543b/kube-multus/1.log" Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.054254 4840 generic.go:334] "Generic (PLEG): container finished" podID="9c465ec1-5011-46d7-bcf3-df79d8b4543b" containerID="f58e1517a2111fc4fc59d0def3cd15c5d0f34babfa2fd766c41b17ea6d14b315" exitCode=2 Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.054280 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n2cr9" event={"ID":"9c465ec1-5011-46d7-bcf3-df79d8b4543b","Type":"ContainerDied","Data":"f58e1517a2111fc4fc59d0def3cd15c5d0f34babfa2fd766c41b17ea6d14b315"} Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.054331 4840 scope.go:117] "RemoveContainer" containerID="cf116e74f5c75d356cca263037e2a3f3691e52068bed58871445e273a1092786" Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.054874 4840 scope.go:117] "RemoveContainer" containerID="f58e1517a2111fc4fc59d0def3cd15c5d0f34babfa2fd766c41b17ea6d14b315" Dec 09 17:08:46 crc kubenswrapper[4840]: E1209 17:08:46.055239 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-n2cr9_openshift-multus(9c465ec1-5011-46d7-bcf3-df79d8b4543b)\"" pod="openshift-multus/multus-n2cr9" podUID="9c465ec1-5011-46d7-bcf3-df79d8b4543b" Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.058796 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovnkube-controller/3.log" Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.061901 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovn-acl-logging/0.log" Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.062671 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovn-controller/0.log" Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063235 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="0b264648af59c3553deaa61e50dd6d64c709043b7d9b08b6cb4a2bb87a665604" exitCode=0 Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063265 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c" exitCode=0 Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063277 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096" exitCode=0 Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063286 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861" exitCode=0 Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063294 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa" exitCode=0 Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063303 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba" exitCode=0 Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063302 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"0b264648af59c3553deaa61e50dd6d64c709043b7d9b08b6cb4a2bb87a665604"} Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063345 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c"} Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063364 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096"} Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063379 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861"} Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063312 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5" exitCode=143 Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063430 4840 generic.go:334] "Generic (PLEG): container finished" podID="33826d17-3660-4069-b173-accfbe7e24b3" containerID="2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c" exitCode=143 Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063392 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa"} Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063487 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba"} Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063506 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5"} Dec 09 17:08:46 crc kubenswrapper[4840]: I1209 17:08:46.063521 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c"} Dec 09 17:08:48 crc kubenswrapper[4840]: E1209 17:08:48.834168 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c is running failed: container process not found" containerID="2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 09 17:08:48 crc kubenswrapper[4840]: E1209 17:08:48.834191 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096 is running failed: container process not found" containerID="5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 09 17:08:48 crc kubenswrapper[4840]: E1209 17:08:48.835040 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096 is running failed: container process not found" containerID="5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 09 17:08:48 crc kubenswrapper[4840]: E1209 17:08:48.835208 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c is running failed: container process not found" containerID="2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 09 17:08:48 crc kubenswrapper[4840]: E1209 17:08:48.835532 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096 is running failed: container process not found" containerID="5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 09 17:08:48 crc kubenswrapper[4840]: E1209 17:08:48.835563 4840 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="nbdb" Dec 09 17:08:48 crc kubenswrapper[4840]: E1209 17:08:48.835619 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c is running failed: container process not found" containerID="2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 09 17:08:48 crc kubenswrapper[4840]: E1209 17:08:48.835633 4840 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="sbdb" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.743853 4840 scope.go:117] "RemoveContainer" containerID="1a6fb10cb6e6690411d4c1ad92ae3018a939a5824c0a9419848b2daa40267852" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.787476 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovn-acl-logging/0.log" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.788008 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovn-controller/0.log" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.788419 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.848853 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fkrtv"] Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849084 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849097 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849105 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovn-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849111 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovn-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849121 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849128 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849135 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovn-acl-logging" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849141 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovn-acl-logging" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849150 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="kube-rbac-proxy-ovn-metrics" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849156 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="kube-rbac-proxy-ovn-metrics" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849164 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849170 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849182 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="northd" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849188 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="northd" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849197 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="kube-rbac-proxy-node" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849203 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="kube-rbac-proxy-node" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849213 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="nbdb" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849218 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="nbdb" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849227 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="sbdb" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849232 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="sbdb" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849240 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="kubecfg-setup" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849246 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="kubecfg-setup" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849364 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="kube-rbac-proxy-ovn-metrics" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849378 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849388 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849393 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovn-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849402 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="nbdb" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849411 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="kube-rbac-proxy-node" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849418 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849424 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="sbdb" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849431 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="northd" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849439 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovn-acl-logging" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849582 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849590 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849673 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849680 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: E1209 17:08:49.849771 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.849778 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="33826d17-3660-4069-b173-accfbe7e24b3" containerName="ovnkube-controller" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.851660 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895200 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-config\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895247 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-bin\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895273 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-log-socket\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895299 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/33826d17-3660-4069-b173-accfbe7e24b3-ovn-node-metrics-cert\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895324 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-systemd-units\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895343 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-env-overrides\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895356 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-kubelet\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895370 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-node-log\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895390 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gm99\" (UniqueName: \"kubernetes.io/projected/33826d17-3660-4069-b173-accfbe7e24b3-kube-api-access-5gm99\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895412 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-slash\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895434 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-openvswitch\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895462 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895476 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-ovn-kubernetes\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895499 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-netns\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895526 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-netd\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895540 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-systemd\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895557 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-var-lib-openvswitch\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895579 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-ovn\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895603 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-script-lib\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895620 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-etc-openvswitch\") pod \"33826d17-3660-4069-b173-accfbe7e24b3\" (UID: \"33826d17-3660-4069-b173-accfbe7e24b3\") " Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.895811 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.896090 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-slash" (OuterVolumeSpecName: "host-slash") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.896118 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.896138 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-log-socket" (OuterVolumeSpecName: "log-socket") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.897168 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.897202 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.897222 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.897241 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.897260 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.897276 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.898055 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.898111 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.898404 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.898437 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.898457 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.898477 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-node-log" (OuterVolumeSpecName: "node-log") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.898694 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.906956 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33826d17-3660-4069-b173-accfbe7e24b3-kube-api-access-5gm99" (OuterVolumeSpecName: "kube-api-access-5gm99") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "kube-api-access-5gm99". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.906951 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33826d17-3660-4069-b173-accfbe7e24b3-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.921750 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "33826d17-3660-4069-b173-accfbe7e24b3" (UID: "33826d17-3660-4069-b173-accfbe7e24b3"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996582 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-log-socket\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996618 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/fe3b5a47-20ec-4204-8e80-6659c1197e92-ovnkube-script-lib\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996639 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-slash\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996662 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-run-netns\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996683 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-kubelet\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996709 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fe3b5a47-20ec-4204-8e80-6659c1197e92-env-overrides\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996737 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-var-lib-openvswitch\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996753 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/fe3b5a47-20ec-4204-8e80-6659c1197e92-ovnkube-config\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996776 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-run-ovn\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996833 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-cni-netd\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996853 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/fe3b5a47-20ec-4204-8e80-6659c1197e92-ovn-node-metrics-cert\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996871 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-node-log\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996889 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ng7f\" (UniqueName: \"kubernetes.io/projected/fe3b5a47-20ec-4204-8e80-6659c1197e92-kube-api-access-9ng7f\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996904 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-etc-openvswitch\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996918 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-run-ovn-kubernetes\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.996936 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-cni-bin\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997025 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-run-systemd\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997095 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997143 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-run-openvswitch\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997167 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-systemd-units\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997261 4840 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-netns\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997276 4840 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-netd\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997286 4840 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-systemd\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997299 4840 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997311 4840 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997321 4840 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997331 4840 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997340 4840 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997351 4840 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-cni-bin\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997362 4840 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-log-socket\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997373 4840 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/33826d17-3660-4069-b173-accfbe7e24b3-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997386 4840 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-systemd-units\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997396 4840 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/33826d17-3660-4069-b173-accfbe7e24b3-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997407 4840 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-kubelet\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997417 4840 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-node-log\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997428 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gm99\" (UniqueName: \"kubernetes.io/projected/33826d17-3660-4069-b173-accfbe7e24b3-kube-api-access-5gm99\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997438 4840 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-slash\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997448 4840 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-run-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997461 4840 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:49 crc kubenswrapper[4840]: I1209 17:08:49.997473 4840 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33826d17-3660-4069-b173-accfbe7e24b3-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.098784 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.098838 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-systemd-units\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.098854 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-run-openvswitch\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.098872 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-log-socket\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.098886 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/fe3b5a47-20ec-4204-8e80-6659c1197e92-ovnkube-script-lib\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.098901 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-slash\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.098919 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-run-netns\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.098937 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-kubelet\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.098959 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fe3b5a47-20ec-4204-8e80-6659c1197e92-env-overrides\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.098990 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-var-lib-openvswitch\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099008 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/fe3b5a47-20ec-4204-8e80-6659c1197e92-ovnkube-config\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099029 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-run-ovn\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099047 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-cni-netd\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099065 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/fe3b5a47-20ec-4204-8e80-6659c1197e92-ovn-node-metrics-cert\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099079 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-node-log\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099097 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ng7f\" (UniqueName: \"kubernetes.io/projected/fe3b5a47-20ec-4204-8e80-6659c1197e92-kube-api-access-9ng7f\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099110 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-etc-openvswitch\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099124 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-run-ovn-kubernetes\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099139 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-cni-bin\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099156 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-run-systemd\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099216 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-run-systemd\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099247 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099268 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-systemd-units\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099287 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-run-openvswitch\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099306 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-log-socket\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099942 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/fe3b5a47-20ec-4204-8e80-6659c1197e92-ovnkube-script-lib\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.099995 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-slash\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.100016 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-run-netns\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.100036 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-kubelet\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.100357 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fe3b5a47-20ec-4204-8e80-6659c1197e92-env-overrides\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.100405 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-var-lib-openvswitch\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.100820 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/fe3b5a47-20ec-4204-8e80-6659c1197e92-ovnkube-config\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.100867 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-run-ovn\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.100888 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-cni-netd\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.101496 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-etc-openvswitch\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.101533 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-node-log\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.101727 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-run-ovn-kubernetes\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.101757 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/fe3b5a47-20ec-4204-8e80-6659c1197e92-host-cni-bin\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.103549 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/fe3b5a47-20ec-4204-8e80-6659c1197e92-ovn-node-metrics-cert\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.109285 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qdpq8" event={"ID":"251a8643-2e5b-4ac3-997d-b275bb1f6d25","Type":"ContainerStarted","Data":"f913c97d2edbbe71969bced679630a6c377bb37d538d96c4a54b6a3e15d547c8"} Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.110925 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n2cr9_9c465ec1-5011-46d7-bcf3-df79d8b4543b/kube-multus/2.log" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.112370 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-wc2mq" event={"ID":"06d88094-c4e9-4872-a546-6c42f9626286","Type":"ContainerStarted","Data":"b9602c006206f737019841a48873c7209f20aaa4b02bf02c557d3bff8dfd88bd"} Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.113099 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-wc2mq" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.117959 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovn-acl-logging/0.log" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.118446 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lpfl9_33826d17-3660-4069-b173-accfbe7e24b3/ovn-controller/0.log" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.118810 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" event={"ID":"33826d17-3660-4069-b173-accfbe7e24b3","Type":"ContainerDied","Data":"e472c683ad5cc7d154b1aa609eb0139e4516abe37b911019e88bd9286833f925"} Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.118896 4840 scope.go:117] "RemoveContainer" containerID="0b264648af59c3553deaa61e50dd6d64c709043b7d9b08b6cb4a2bb87a665604" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.119018 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lpfl9" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.125700 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j" event={"ID":"1e62864a-4c1d-4543-a379-ce30d0e68ea6","Type":"ContainerStarted","Data":"f3cc651b0e7e90386a5496c611ec4f21d5f9bd498750434b4f09bca698ed94e3"} Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.126884 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ng7f\" (UniqueName: \"kubernetes.io/projected/fe3b5a47-20ec-4204-8e80-6659c1197e92-kube-api-access-9ng7f\") pod \"ovnkube-node-fkrtv\" (UID: \"fe3b5a47-20ec-4204-8e80-6659c1197e92\") " pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.128853 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz" event={"ID":"c9b0db50-828e-4e51-9ea5-099a055f6c0f","Type":"ContainerStarted","Data":"25df6feeebb3a321075992ee5f45adbf74b588b616b05fc5918ed41dd2eb75f4"} Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.130556 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" event={"ID":"ddf0a1fc-865e-44d5-b4e9-f470cbcbc031","Type":"ContainerStarted","Data":"92ca1a8d7e8569006dedcb6c2859ffe1d98b66a401166bae846e60e4c9615f8b"} Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.131594 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.132820 4840 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-sqt7j container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.72:8081/healthz\": dial tcp 10.217.0.72:8081: connect: connection refused" start-of-body= Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.132855 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" podUID="ddf0a1fc-865e-44d5-b4e9-f470cbcbc031" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.72:8081/healthz\": dial tcp 10.217.0.72:8081: connect: connection refused" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.140527 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-wc2mq" podStartSLOduration=1.004751102 podStartE2EDuration="12.140512179s" podCreationTimestamp="2025-12-09 17:08:38 +0000 UTC" firstStartedPulling="2025-12-09 17:08:38.627974047 +0000 UTC m=+704.619084670" lastFinishedPulling="2025-12-09 17:08:49.763735094 +0000 UTC m=+715.754845747" observedRunningTime="2025-12-09 17:08:50.136605752 +0000 UTC m=+716.127716405" watchObservedRunningTime="2025-12-09 17:08:50.140512179 +0000 UTC m=+716.131622812" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.148624 4840 scope.go:117] "RemoveContainer" containerID="2ac5bf002d96c47dbab436f22b4bdd2efbb9b2560cf8bdf190ee820022c6ca8c" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.163706 4840 scope.go:117] "RemoveContainer" containerID="5ca5752b3822076d62344ad37e54bfefddd1445259b26a71b61e86c946427096" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.171029 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" podStartSLOduration=1.882657825 podStartE2EDuration="13.171010886s" podCreationTimestamp="2025-12-09 17:08:37 +0000 UTC" firstStartedPulling="2025-12-09 17:08:38.529847108 +0000 UTC m=+704.520957741" lastFinishedPulling="2025-12-09 17:08:49.818200169 +0000 UTC m=+715.809310802" observedRunningTime="2025-12-09 17:08:50.158646497 +0000 UTC m=+716.149757130" watchObservedRunningTime="2025-12-09 17:08:50.171010886 +0000 UTC m=+716.162121519" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.174373 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz" podStartSLOduration=2.253517888 podStartE2EDuration="13.174357368s" podCreationTimestamp="2025-12-09 17:08:37 +0000 UTC" firstStartedPulling="2025-12-09 17:08:38.826674018 +0000 UTC m=+704.817784651" lastFinishedPulling="2025-12-09 17:08:49.747513498 +0000 UTC m=+715.738624131" observedRunningTime="2025-12-09 17:08:50.170162683 +0000 UTC m=+716.161273336" watchObservedRunningTime="2025-12-09 17:08:50.174357368 +0000 UTC m=+716.165468001" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.186413 4840 scope.go:117] "RemoveContainer" containerID="5f91847dccf2ee4753adf8ee2cf3d20423b8c2911ce744d8f0087a510d892861" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.191112 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.208349 4840 scope.go:117] "RemoveContainer" containerID="9fdbbe0d45e8ff27c3063b48899282eeec86bac9a8d1319dedff3de37e1dffaa" Dec 09 17:08:50 crc kubenswrapper[4840]: W1209 17:08:50.220514 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe3b5a47_20ec_4204_8e80_6659c1197e92.slice/crio-7fdadb23d53c0ddb348d267587a7a5b7959c855503c69fa3b971a358ead6b9a2 WatchSource:0}: Error finding container 7fdadb23d53c0ddb348d267587a7a5b7959c855503c69fa3b971a358ead6b9a2: Status 404 returned error can't find the container with id 7fdadb23d53c0ddb348d267587a7a5b7959c855503c69fa3b971a358ead6b9a2 Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.231802 4840 scope.go:117] "RemoveContainer" containerID="85810b71b18f92108ceca824ee4aa7716d13cc064cee17456d508744732c00ba" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.236711 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j" podStartSLOduration=1.7878885279999999 podStartE2EDuration="13.2366916s" podCreationTimestamp="2025-12-09 17:08:37 +0000 UTC" firstStartedPulling="2025-12-09 17:08:38.31269647 +0000 UTC m=+704.303807103" lastFinishedPulling="2025-12-09 17:08:49.761499542 +0000 UTC m=+715.752610175" observedRunningTime="2025-12-09 17:08:50.236107814 +0000 UTC m=+716.227218467" watchObservedRunningTime="2025-12-09 17:08:50.2366916 +0000 UTC m=+716.227802233" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.280760 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lpfl9"] Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.282416 4840 scope.go:117] "RemoveContainer" containerID="dc9c69f9e7d20b59415742d43d6bef10956bc6092d175ace82b8c52f7f3f43e5" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.286940 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lpfl9"] Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.318129 4840 scope.go:117] "RemoveContainer" containerID="2b7ad6699f3a51d77f5ffdf8558fcd4ac8a067a09bf3ffe83155af4620c1114c" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.357257 4840 scope.go:117] "RemoveContainer" containerID="f006de3e543c40300a3d4b71efc5894e68be14b73aa70dc2f8f2d2273325c7ca" Dec 09 17:08:50 crc kubenswrapper[4840]: I1209 17:08:50.617372 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33826d17-3660-4069-b173-accfbe7e24b3" path="/var/lib/kubelet/pods/33826d17-3660-4069-b173-accfbe7e24b3/volumes" Dec 09 17:08:51 crc kubenswrapper[4840]: I1209 17:08:51.137660 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe3b5a47-20ec-4204-8e80-6659c1197e92" containerID="89278efc3e424d34f62330788a14ba14ce12af7b03369e29fcaab141cd538d14" exitCode=0 Dec 09 17:08:51 crc kubenswrapper[4840]: I1209 17:08:51.138488 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" event={"ID":"fe3b5a47-20ec-4204-8e80-6659c1197e92","Type":"ContainerDied","Data":"89278efc3e424d34f62330788a14ba14ce12af7b03369e29fcaab141cd538d14"} Dec 09 17:08:51 crc kubenswrapper[4840]: I1209 17:08:51.138517 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" event={"ID":"fe3b5a47-20ec-4204-8e80-6659c1197e92","Type":"ContainerStarted","Data":"7fdadb23d53c0ddb348d267587a7a5b7959c855503c69fa3b971a358ead6b9a2"} Dec 09 17:08:51 crc kubenswrapper[4840]: I1209 17:08:51.141671 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-sqt7j" Dec 09 17:08:51 crc kubenswrapper[4840]: I1209 17:08:51.194759 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qdpq8" podStartSLOduration=2.491721298 podStartE2EDuration="14.194734478s" podCreationTimestamp="2025-12-09 17:08:37 +0000 UTC" firstStartedPulling="2025-12-09 17:08:38.099414096 +0000 UTC m=+704.090524729" lastFinishedPulling="2025-12-09 17:08:49.802427276 +0000 UTC m=+715.793537909" observedRunningTime="2025-12-09 17:08:51.193820133 +0000 UTC m=+717.184930766" watchObservedRunningTime="2025-12-09 17:08:51.194734478 +0000 UTC m=+717.185845121" Dec 09 17:08:52 crc kubenswrapper[4840]: I1209 17:08:52.149065 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" event={"ID":"fe3b5a47-20ec-4204-8e80-6659c1197e92","Type":"ContainerStarted","Data":"27ebfa77add49534a7c0a76ad0790f4b7842431f0f3b598faae6dd088adb27fa"} Dec 09 17:08:52 crc kubenswrapper[4840]: I1209 17:08:52.149388 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" event={"ID":"fe3b5a47-20ec-4204-8e80-6659c1197e92","Type":"ContainerStarted","Data":"1004c83b5ff526003a92b53bb687a5d5ff5076398cf5b62c8f76a774e367450b"} Dec 09 17:08:52 crc kubenswrapper[4840]: I1209 17:08:52.149403 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" event={"ID":"fe3b5a47-20ec-4204-8e80-6659c1197e92","Type":"ContainerStarted","Data":"d25a32314eb50b418248e247d8a704f43fc2b355d80b1da68a3409223cb15836"} Dec 09 17:08:52 crc kubenswrapper[4840]: I1209 17:08:52.149413 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" event={"ID":"fe3b5a47-20ec-4204-8e80-6659c1197e92","Type":"ContainerStarted","Data":"4936d6ee948a1168ab1bfa17c7833c24396015b2ee9563077ee643ae5714efb2"} Dec 09 17:08:52 crc kubenswrapper[4840]: I1209 17:08:52.149423 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" event={"ID":"fe3b5a47-20ec-4204-8e80-6659c1197e92","Type":"ContainerStarted","Data":"1f1f489a08ef000a5e2a394bcced8b1c4e4ef38c9c59765b1fc0eb78f991c25f"} Dec 09 17:08:52 crc kubenswrapper[4840]: I1209 17:08:52.149432 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" event={"ID":"fe3b5a47-20ec-4204-8e80-6659c1197e92","Type":"ContainerStarted","Data":"0be286a3e87d7b809918f456dee1df79f4f1f3e2737ed79f712993da58302d56"} Dec 09 17:08:54 crc kubenswrapper[4840]: I1209 17:08:54.172683 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" event={"ID":"fe3b5a47-20ec-4204-8e80-6659c1197e92","Type":"ContainerStarted","Data":"0f6e3869b668efb1ae4c44e30ccd1b760759fa650ef052d5287e2a3e12d25832"} Dec 09 17:08:57 crc kubenswrapper[4840]: I1209 17:08:57.189394 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" event={"ID":"fe3b5a47-20ec-4204-8e80-6659c1197e92","Type":"ContainerStarted","Data":"92b6a6f424ada1f98e17ea0897f0ccde9241facf4d024da1537e225ece3acbbe"} Dec 09 17:08:57 crc kubenswrapper[4840]: I1209 17:08:57.189889 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:57 crc kubenswrapper[4840]: I1209 17:08:57.189904 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:57 crc kubenswrapper[4840]: I1209 17:08:57.218092 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:57 crc kubenswrapper[4840]: I1209 17:08:57.231685 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" podStartSLOduration=8.231659059 podStartE2EDuration="8.231659059s" podCreationTimestamp="2025-12-09 17:08:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:08:57.227571856 +0000 UTC m=+723.218682489" watchObservedRunningTime="2025-12-09 17:08:57.231659059 +0000 UTC m=+723.222769692" Dec 09 17:08:58 crc kubenswrapper[4840]: I1209 17:08:58.196679 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:58 crc kubenswrapper[4840]: I1209 17:08:58.271378 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:08:58 crc kubenswrapper[4840]: I1209 17:08:58.353708 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-wc2mq" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.061637 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-6wmh8"] Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.062856 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.065054 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.066037 4840 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-8bvz8" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.066045 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.072462 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-rvq4r"] Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.073140 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.074825 4840 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-flpmf" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.082753 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-6wmh8"] Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.091460 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-n9tqx"] Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.092535 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.096505 4840 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-lh4zn" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.099212 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-rvq4r"] Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.108847 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-n9tqx"] Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.239824 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4krf\" (UniqueName: \"kubernetes.io/projected/f7a8b3dc-71ea-45c4-9699-4c7194a5d90f-kube-api-access-x4krf\") pod \"cert-manager-webhook-5655c58dd6-n9tqx\" (UID: \"f7a8b3dc-71ea-45c4-9699-4c7194a5d90f\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.239924 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t55h5\" (UniqueName: \"kubernetes.io/projected/96234cfc-2b1f-4a48-8631-c22932a7129e-kube-api-access-t55h5\") pod \"cert-manager-cainjector-7f985d654d-rvq4r\" (UID: \"96234cfc-2b1f-4a48-8631-c22932a7129e\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.239944 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbv7l\" (UniqueName: \"kubernetes.io/projected/a56be419-62f5-4c5e-87dc-11c097f51918-kube-api-access-rbv7l\") pod \"cert-manager-5b446d88c5-6wmh8\" (UID: \"a56be419-62f5-4c5e-87dc-11c097f51918\") " pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.341188 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t55h5\" (UniqueName: \"kubernetes.io/projected/96234cfc-2b1f-4a48-8631-c22932a7129e-kube-api-access-t55h5\") pod \"cert-manager-cainjector-7f985d654d-rvq4r\" (UID: \"96234cfc-2b1f-4a48-8631-c22932a7129e\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.341435 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbv7l\" (UniqueName: \"kubernetes.io/projected/a56be419-62f5-4c5e-87dc-11c097f51918-kube-api-access-rbv7l\") pod \"cert-manager-5b446d88c5-6wmh8\" (UID: \"a56be419-62f5-4c5e-87dc-11c097f51918\") " pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.341604 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4krf\" (UniqueName: \"kubernetes.io/projected/f7a8b3dc-71ea-45c4-9699-4c7194a5d90f-kube-api-access-x4krf\") pod \"cert-manager-webhook-5655c58dd6-n9tqx\" (UID: \"f7a8b3dc-71ea-45c4-9699-4c7194a5d90f\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.366914 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbv7l\" (UniqueName: \"kubernetes.io/projected/a56be419-62f5-4c5e-87dc-11c097f51918-kube-api-access-rbv7l\") pod \"cert-manager-5b446d88c5-6wmh8\" (UID: \"a56be419-62f5-4c5e-87dc-11c097f51918\") " pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.368948 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t55h5\" (UniqueName: \"kubernetes.io/projected/96234cfc-2b1f-4a48-8631-c22932a7129e-kube-api-access-t55h5\") pod \"cert-manager-cainjector-7f985d654d-rvq4r\" (UID: \"96234cfc-2b1f-4a48-8631-c22932a7129e\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.370433 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4krf\" (UniqueName: \"kubernetes.io/projected/f7a8b3dc-71ea-45c4-9699-4c7194a5d90f-kube-api-access-x4krf\") pod \"cert-manager-webhook-5655c58dd6-n9tqx\" (UID: \"f7a8b3dc-71ea-45c4-9699-4c7194a5d90f\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.386291 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.395056 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:08:59 crc kubenswrapper[4840]: I1209 17:08:59.419129 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.473134 4840 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(6c060a556c3162ebae59890be51dd67e8c8964f6665c17ac3ded7789eb06a602): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.473216 4840 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(6c060a556c3162ebae59890be51dd67e8c8964f6665c17ac3ded7789eb06a602): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.473244 4840 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(6c060a556c3162ebae59890be51dd67e8c8964f6665c17ac3ded7789eb06a602): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.473294 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"cert-manager-cainjector-7f985d654d-rvq4r_cert-manager(96234cfc-2b1f-4a48-8631-c22932a7129e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"cert-manager-cainjector-7f985d654d-rvq4r_cert-manager(96234cfc-2b1f-4a48-8631-c22932a7129e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(6c060a556c3162ebae59890be51dd67e8c8964f6665c17ac3ded7789eb06a602): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" podUID="96234cfc-2b1f-4a48-8631-c22932a7129e" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.484895 4840 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(1ed85fe2bb4c04d2774c2019d72005a61af9d43dd1e5dd1d7b0117633ab5990f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.484990 4840 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(1ed85fe2bb4c04d2774c2019d72005a61af9d43dd1e5dd1d7b0117633ab5990f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.485021 4840 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(1ed85fe2bb4c04d2774c2019d72005a61af9d43dd1e5dd1d7b0117633ab5990f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.485079 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"cert-manager-webhook-5655c58dd6-n9tqx_cert-manager(f7a8b3dc-71ea-45c4-9699-4c7194a5d90f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"cert-manager-webhook-5655c58dd6-n9tqx_cert-manager(f7a8b3dc-71ea-45c4-9699-4c7194a5d90f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(1ed85fe2bb4c04d2774c2019d72005a61af9d43dd1e5dd1d7b0117633ab5990f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" podUID="f7a8b3dc-71ea-45c4-9699-4c7194a5d90f" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.493179 4840 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(2f1ca65a509baad1205df3f61875ac1d4a25435ea5ba426ab0694a207912aa4a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.493249 4840 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(2f1ca65a509baad1205df3f61875ac1d4a25435ea5ba426ab0694a207912aa4a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.493278 4840 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(2f1ca65a509baad1205df3f61875ac1d4a25435ea5ba426ab0694a207912aa4a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:08:59 crc kubenswrapper[4840]: E1209 17:08:59.493333 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"cert-manager-5b446d88c5-6wmh8_cert-manager(a56be419-62f5-4c5e-87dc-11c097f51918)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"cert-manager-5b446d88c5-6wmh8_cert-manager(a56be419-62f5-4c5e-87dc-11c097f51918)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(2f1ca65a509baad1205df3f61875ac1d4a25435ea5ba426ab0694a207912aa4a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" podUID="a56be419-62f5-4c5e-87dc-11c097f51918" Dec 09 17:09:00 crc kubenswrapper[4840]: I1209 17:09:00.209558 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:00 crc kubenswrapper[4840]: I1209 17:09:00.209590 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:09:00 crc kubenswrapper[4840]: I1209 17:09:00.209596 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:09:00 crc kubenswrapper[4840]: I1209 17:09:00.210064 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:09:00 crc kubenswrapper[4840]: I1209 17:09:00.210068 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:09:00 crc kubenswrapper[4840]: I1209 17:09:00.210074 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.274192 4840 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(5b3f10a98a216651e6d23f21143876e7344dd350bee06bdcc730f5fa09bc7cba): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.274254 4840 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(5b3f10a98a216651e6d23f21143876e7344dd350bee06bdcc730f5fa09bc7cba): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.274274 4840 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(5b3f10a98a216651e6d23f21143876e7344dd350bee06bdcc730f5fa09bc7cba): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.274317 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"cert-manager-5b446d88c5-6wmh8_cert-manager(a56be419-62f5-4c5e-87dc-11c097f51918)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"cert-manager-5b446d88c5-6wmh8_cert-manager(a56be419-62f5-4c5e-87dc-11c097f51918)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(5b3f10a98a216651e6d23f21143876e7344dd350bee06bdcc730f5fa09bc7cba): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" podUID="a56be419-62f5-4c5e-87dc-11c097f51918" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.276946 4840 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(646ba931cae95f128b165b43a93dde7f2972f1f86dc29c8b0d7d64b064e6586d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.277036 4840 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(646ba931cae95f128b165b43a93dde7f2972f1f86dc29c8b0d7d64b064e6586d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.277058 4840 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(646ba931cae95f128b165b43a93dde7f2972f1f86dc29c8b0d7d64b064e6586d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.277104 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"cert-manager-cainjector-7f985d654d-rvq4r_cert-manager(96234cfc-2b1f-4a48-8631-c22932a7129e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"cert-manager-cainjector-7f985d654d-rvq4r_cert-manager(96234cfc-2b1f-4a48-8631-c22932a7129e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(646ba931cae95f128b165b43a93dde7f2972f1f86dc29c8b0d7d64b064e6586d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" podUID="96234cfc-2b1f-4a48-8631-c22932a7129e" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.293486 4840 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(933613b5fe1cb1c37c7b6d46f7ddba9629bc4c5d7ec44a8ac8c91e75da478b0b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.293562 4840 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(933613b5fe1cb1c37c7b6d46f7ddba9629bc4c5d7ec44a8ac8c91e75da478b0b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.293589 4840 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(933613b5fe1cb1c37c7b6d46f7ddba9629bc4c5d7ec44a8ac8c91e75da478b0b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:00 crc kubenswrapper[4840]: E1209 17:09:00.293646 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"cert-manager-webhook-5655c58dd6-n9tqx_cert-manager(f7a8b3dc-71ea-45c4-9699-4c7194a5d90f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"cert-manager-webhook-5655c58dd6-n9tqx_cert-manager(f7a8b3dc-71ea-45c4-9699-4c7194a5d90f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(933613b5fe1cb1c37c7b6d46f7ddba9629bc4c5d7ec44a8ac8c91e75da478b0b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" podUID="f7a8b3dc-71ea-45c4-9699-4c7194a5d90f" Dec 09 17:09:01 crc kubenswrapper[4840]: I1209 17:09:01.608279 4840 scope.go:117] "RemoveContainer" containerID="f58e1517a2111fc4fc59d0def3cd15c5d0f34babfa2fd766c41b17ea6d14b315" Dec 09 17:09:01 crc kubenswrapper[4840]: E1209 17:09:01.608485 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-n2cr9_openshift-multus(9c465ec1-5011-46d7-bcf3-df79d8b4543b)\"" pod="openshift-multus/multus-n2cr9" podUID="9c465ec1-5011-46d7-bcf3-df79d8b4543b" Dec 09 17:09:04 crc kubenswrapper[4840]: I1209 17:09:04.036336 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:09:04 crc kubenswrapper[4840]: I1209 17:09:04.036775 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:09:11 crc kubenswrapper[4840]: I1209 17:09:11.608326 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:11 crc kubenswrapper[4840]: I1209 17:09:11.609572 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:11 crc kubenswrapper[4840]: E1209 17:09:11.646922 4840 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(d55fc11b6849e9a458e2252ff585b1a49b549b18d4c2c389ecb74de92255d83b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 17:09:11 crc kubenswrapper[4840]: E1209 17:09:11.647292 4840 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(d55fc11b6849e9a458e2252ff585b1a49b549b18d4c2c389ecb74de92255d83b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:11 crc kubenswrapper[4840]: E1209 17:09:11.647318 4840 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(d55fc11b6849e9a458e2252ff585b1a49b549b18d4c2c389ecb74de92255d83b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:11 crc kubenswrapper[4840]: E1209 17:09:11.647377 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"cert-manager-webhook-5655c58dd6-n9tqx_cert-manager(f7a8b3dc-71ea-45c4-9699-4c7194a5d90f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"cert-manager-webhook-5655c58dd6-n9tqx_cert-manager(f7a8b3dc-71ea-45c4-9699-4c7194a5d90f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-webhook-5655c58dd6-n9tqx_cert-manager_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f_0(d55fc11b6849e9a458e2252ff585b1a49b549b18d4c2c389ecb74de92255d83b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" podUID="f7a8b3dc-71ea-45c4-9699-4c7194a5d90f" Dec 09 17:09:14 crc kubenswrapper[4840]: I1209 17:09:14.608122 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:09:14 crc kubenswrapper[4840]: I1209 17:09:14.608122 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:09:14 crc kubenswrapper[4840]: I1209 17:09:14.613848 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:09:14 crc kubenswrapper[4840]: I1209 17:09:14.614090 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:09:14 crc kubenswrapper[4840]: E1209 17:09:14.662377 4840 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(1d8c416b91b56d69a43d413d98cb6902984b5ff24a3a180d7a8e4e275774b4ad): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 17:09:14 crc kubenswrapper[4840]: E1209 17:09:14.662468 4840 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(1d8c416b91b56d69a43d413d98cb6902984b5ff24a3a180d7a8e4e275774b4ad): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:09:14 crc kubenswrapper[4840]: E1209 17:09:14.662501 4840 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(1d8c416b91b56d69a43d413d98cb6902984b5ff24a3a180d7a8e4e275774b4ad): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:09:14 crc kubenswrapper[4840]: E1209 17:09:14.662550 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"cert-manager-5b446d88c5-6wmh8_cert-manager(a56be419-62f5-4c5e-87dc-11c097f51918)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"cert-manager-5b446d88c5-6wmh8_cert-manager(a56be419-62f5-4c5e-87dc-11c097f51918)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-5b446d88c5-6wmh8_cert-manager_a56be419-62f5-4c5e-87dc-11c097f51918_0(1d8c416b91b56d69a43d413d98cb6902984b5ff24a3a180d7a8e4e275774b4ad): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" podUID="a56be419-62f5-4c5e-87dc-11c097f51918" Dec 09 17:09:14 crc kubenswrapper[4840]: E1209 17:09:14.682003 4840 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(dbeb557a08369004d79ccd17751efc611feb95fe7a242d1f293392b7535ade2e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 17:09:14 crc kubenswrapper[4840]: E1209 17:09:14.682119 4840 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(dbeb557a08369004d79ccd17751efc611feb95fe7a242d1f293392b7535ade2e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:09:14 crc kubenswrapper[4840]: E1209 17:09:14.682169 4840 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(dbeb557a08369004d79ccd17751efc611feb95fe7a242d1f293392b7535ade2e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:09:14 crc kubenswrapper[4840]: E1209 17:09:14.682291 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"cert-manager-cainjector-7f985d654d-rvq4r_cert-manager(96234cfc-2b1f-4a48-8631-c22932a7129e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"cert-manager-cainjector-7f985d654d-rvq4r_cert-manager(96234cfc-2b1f-4a48-8631-c22932a7129e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_cert-manager-cainjector-7f985d654d-rvq4r_cert-manager_96234cfc-2b1f-4a48-8631-c22932a7129e_0(dbeb557a08369004d79ccd17751efc611feb95fe7a242d1f293392b7535ade2e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" podUID="96234cfc-2b1f-4a48-8631-c22932a7129e" Dec 09 17:09:16 crc kubenswrapper[4840]: I1209 17:09:16.608087 4840 scope.go:117] "RemoveContainer" containerID="f58e1517a2111fc4fc59d0def3cd15c5d0f34babfa2fd766c41b17ea6d14b315" Dec 09 17:09:18 crc kubenswrapper[4840]: I1209 17:09:18.321535 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n2cr9_9c465ec1-5011-46d7-bcf3-df79d8b4543b/kube-multus/2.log" Dec 09 17:09:18 crc kubenswrapper[4840]: I1209 17:09:18.321891 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n2cr9" event={"ID":"9c465ec1-5011-46d7-bcf3-df79d8b4543b","Type":"ContainerStarted","Data":"4b97d1aef7f2b29f01b4d880c80160ddedcee880591d0811c14f97021b45f4c2"} Dec 09 17:09:20 crc kubenswrapper[4840]: I1209 17:09:20.223585 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fkrtv" Dec 09 17:09:23 crc kubenswrapper[4840]: I1209 17:09:23.608459 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:23 crc kubenswrapper[4840]: I1209 17:09:23.609223 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:24 crc kubenswrapper[4840]: I1209 17:09:24.082377 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-n9tqx"] Dec 09 17:09:24 crc kubenswrapper[4840]: I1209 17:09:24.371005 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" event={"ID":"f7a8b3dc-71ea-45c4-9699-4c7194a5d90f","Type":"ContainerStarted","Data":"aa1ab3616b1052c5074056f5fa02c4e6e4a1ccfbf5625e9485e7e5bdde2aa08f"} Dec 09 17:09:25 crc kubenswrapper[4840]: I1209 17:09:25.473423 4840 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 09 17:09:27 crc kubenswrapper[4840]: I1209 17:09:27.410371 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" event={"ID":"f7a8b3dc-71ea-45c4-9699-4c7194a5d90f","Type":"ContainerStarted","Data":"1ae6565a1913b5dd292d46280b6b852c6298215cfb2b06d3db4f21f0cb61e886"} Dec 09 17:09:27 crc kubenswrapper[4840]: I1209 17:09:27.410695 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:27 crc kubenswrapper[4840]: I1209 17:09:27.432577 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" podStartSLOduration=25.96701199 podStartE2EDuration="28.432555202s" podCreationTimestamp="2025-12-09 17:08:59 +0000 UTC" firstStartedPulling="2025-12-09 17:09:24.086345867 +0000 UTC m=+750.077456540" lastFinishedPulling="2025-12-09 17:09:26.551889119 +0000 UTC m=+752.542999752" observedRunningTime="2025-12-09 17:09:27.428519972 +0000 UTC m=+753.419630615" watchObservedRunningTime="2025-12-09 17:09:27.432555202 +0000 UTC m=+753.423665835" Dec 09 17:09:28 crc kubenswrapper[4840]: I1209 17:09:28.607931 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:09:28 crc kubenswrapper[4840]: I1209 17:09:28.608210 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:09:28 crc kubenswrapper[4840]: I1209 17:09:28.608334 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" Dec 09 17:09:28 crc kubenswrapper[4840]: I1209 17:09:28.609025 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" Dec 09 17:09:28 crc kubenswrapper[4840]: I1209 17:09:28.863420 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-rvq4r"] Dec 09 17:09:28 crc kubenswrapper[4840]: W1209 17:09:28.876242 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod96234cfc_2b1f_4a48_8631_c22932a7129e.slice/crio-f7f655c9ea37884e4548c92d110d5cfbe2da2903301010e9cd0ad2fae1ac3fe7 WatchSource:0}: Error finding container f7f655c9ea37884e4548c92d110d5cfbe2da2903301010e9cd0ad2fae1ac3fe7: Status 404 returned error can't find the container with id f7f655c9ea37884e4548c92d110d5cfbe2da2903301010e9cd0ad2fae1ac3fe7 Dec 09 17:09:28 crc kubenswrapper[4840]: I1209 17:09:28.896851 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-6wmh8"] Dec 09 17:09:29 crc kubenswrapper[4840]: I1209 17:09:29.427447 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" event={"ID":"96234cfc-2b1f-4a48-8631-c22932a7129e","Type":"ContainerStarted","Data":"f7f655c9ea37884e4548c92d110d5cfbe2da2903301010e9cd0ad2fae1ac3fe7"} Dec 09 17:09:29 crc kubenswrapper[4840]: I1209 17:09:29.431009 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" event={"ID":"a56be419-62f5-4c5e-87dc-11c097f51918","Type":"ContainerStarted","Data":"2be63566c9a5c98a678b0675b443e46eeca60aa8bdd642748fe3c134cf5c3d1f"} Dec 09 17:09:31 crc kubenswrapper[4840]: I1209 17:09:31.446107 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" event={"ID":"96234cfc-2b1f-4a48-8631-c22932a7129e","Type":"ContainerStarted","Data":"7e7263f656bba6050d0a13c331303f64488c16a05c2e46fc5504fb526603bbaf"} Dec 09 17:09:31 crc kubenswrapper[4840]: I1209 17:09:31.447797 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" event={"ID":"a56be419-62f5-4c5e-87dc-11c097f51918","Type":"ContainerStarted","Data":"3b2266b4c3a389cb342abc698c8a34970c1185ce6d14a189c1b69adcc39168ae"} Dec 09 17:09:31 crc kubenswrapper[4840]: I1209 17:09:31.468474 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-rvq4r" podStartSLOduration=30.643846984 podStartE2EDuration="32.468455117s" podCreationTimestamp="2025-12-09 17:08:59 +0000 UTC" firstStartedPulling="2025-12-09 17:09:28.879471305 +0000 UTC m=+754.870581938" lastFinishedPulling="2025-12-09 17:09:30.704079398 +0000 UTC m=+756.695190071" observedRunningTime="2025-12-09 17:09:31.466718729 +0000 UTC m=+757.457829362" watchObservedRunningTime="2025-12-09 17:09:31.468455117 +0000 UTC m=+757.459565750" Dec 09 17:09:31 crc kubenswrapper[4840]: I1209 17:09:31.484270 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-6wmh8" podStartSLOduration=30.630139079 podStartE2EDuration="32.484252201s" podCreationTimestamp="2025-12-09 17:08:59 +0000 UTC" firstStartedPulling="2025-12-09 17:09:28.908373769 +0000 UTC m=+754.899484402" lastFinishedPulling="2025-12-09 17:09:30.762486891 +0000 UTC m=+756.753597524" observedRunningTime="2025-12-09 17:09:31.481280939 +0000 UTC m=+757.472391572" watchObservedRunningTime="2025-12-09 17:09:31.484252201 +0000 UTC m=+757.475362834" Dec 09 17:09:34 crc kubenswrapper[4840]: I1209 17:09:34.036375 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:09:34 crc kubenswrapper[4840]: I1209 17:09:34.036705 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:09:34 crc kubenswrapper[4840]: I1209 17:09:34.036767 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:09:34 crc kubenswrapper[4840]: I1209 17:09:34.037647 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0f2e3a684de848e9b5fa655a4ae5fc5ca866cec15401f4140baa9862458b991d"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:09:34 crc kubenswrapper[4840]: I1209 17:09:34.037743 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://0f2e3a684de848e9b5fa655a4ae5fc5ca866cec15401f4140baa9862458b991d" gracePeriod=600 Dec 09 17:09:34 crc kubenswrapper[4840]: I1209 17:09:34.425074 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-n9tqx" Dec 09 17:09:34 crc kubenswrapper[4840]: I1209 17:09:34.474427 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="0f2e3a684de848e9b5fa655a4ae5fc5ca866cec15401f4140baa9862458b991d" exitCode=0 Dec 09 17:09:34 crc kubenswrapper[4840]: I1209 17:09:34.474492 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"0f2e3a684de848e9b5fa655a4ae5fc5ca866cec15401f4140baa9862458b991d"} Dec 09 17:09:34 crc kubenswrapper[4840]: I1209 17:09:34.474527 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"a086878e48cc9f08081b2c7308c271c8366e842b5fcd3ad6accfb78d0872a65b"} Dec 09 17:09:34 crc kubenswrapper[4840]: I1209 17:09:34.474554 4840 scope.go:117] "RemoveContainer" containerID="09f6a49f6439950d858bfbcc221bf4d2c913dd3fdfb026b55c0db62f0490a808" Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.744280 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc"] Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.746657 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.749169 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.767948 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc"] Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.768447 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rhzp\" (UniqueName: \"kubernetes.io/projected/9264f055-61da-418d-8dad-4b0c00694797-kube-api-access-9rhzp\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.768495 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-bundle\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.768557 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-util\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.869754 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rhzp\" (UniqueName: \"kubernetes.io/projected/9264f055-61da-418d-8dad-4b0c00694797-kube-api-access-9rhzp\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.869794 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-bundle\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.869842 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-util\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.870307 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-util\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.870524 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-bundle\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:08 crc kubenswrapper[4840]: I1209 17:10:08.892009 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rhzp\" (UniqueName: \"kubernetes.io/projected/9264f055-61da-418d-8dad-4b0c00694797-kube-api-access-9rhzp\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:09 crc kubenswrapper[4840]: I1209 17:10:09.061768 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:09 crc kubenswrapper[4840]: I1209 17:10:09.404572 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc"] Dec 09 17:10:09 crc kubenswrapper[4840]: I1209 17:10:09.746000 4840 generic.go:334] "Generic (PLEG): container finished" podID="9264f055-61da-418d-8dad-4b0c00694797" containerID="fdf89848a5a0d5dc908f2027fd75a5b306aa4e10c92c77f317d921d1df4565fd" exitCode=0 Dec 09 17:10:09 crc kubenswrapper[4840]: I1209 17:10:09.746042 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" event={"ID":"9264f055-61da-418d-8dad-4b0c00694797","Type":"ContainerDied","Data":"fdf89848a5a0d5dc908f2027fd75a5b306aa4e10c92c77f317d921d1df4565fd"} Dec 09 17:10:09 crc kubenswrapper[4840]: I1209 17:10:09.746069 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" event={"ID":"9264f055-61da-418d-8dad-4b0c00694797","Type":"ContainerStarted","Data":"56630529b357bde480060e44f851d8838c7678c6df9e636f857df33b0d42867b"} Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.643843 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.645047 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.648070 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.649759 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.651679 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.652681 4840 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-kb7h6" Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.793928 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-08937326-7487-43b4-ac91-4184e86bf6a0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-08937326-7487-43b4-ac91-4184e86bf6a0\") pod \"minio\" (UID: \"66601624-94ae-4407-b0c6-b5da03ab86b6\") " pod="minio-dev/minio" Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.794226 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6rcm\" (UniqueName: \"kubernetes.io/projected/66601624-94ae-4407-b0c6-b5da03ab86b6-kube-api-access-s6rcm\") pod \"minio\" (UID: \"66601624-94ae-4407-b0c6-b5da03ab86b6\") " pod="minio-dev/minio" Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.895194 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6rcm\" (UniqueName: \"kubernetes.io/projected/66601624-94ae-4407-b0c6-b5da03ab86b6-kube-api-access-s6rcm\") pod \"minio\" (UID: \"66601624-94ae-4407-b0c6-b5da03ab86b6\") " pod="minio-dev/minio" Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.895315 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-08937326-7487-43b4-ac91-4184e86bf6a0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-08937326-7487-43b4-ac91-4184e86bf6a0\") pod \"minio\" (UID: \"66601624-94ae-4407-b0c6-b5da03ab86b6\") " pod="minio-dev/minio" Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.899248 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.899301 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-08937326-7487-43b4-ac91-4184e86bf6a0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-08937326-7487-43b4-ac91-4184e86bf6a0\") pod \"minio\" (UID: \"66601624-94ae-4407-b0c6-b5da03ab86b6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/18e7d6dd104e61d0dbd488dcbabc0b4b249ed517e63ad7b10d46179003b8c502/globalmount\"" pod="minio-dev/minio" Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.923034 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6rcm\" (UniqueName: \"kubernetes.io/projected/66601624-94ae-4407-b0c6-b5da03ab86b6-kube-api-access-s6rcm\") pod \"minio\" (UID: \"66601624-94ae-4407-b0c6-b5da03ab86b6\") " pod="minio-dev/minio" Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.939651 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-08937326-7487-43b4-ac91-4184e86bf6a0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-08937326-7487-43b4-ac91-4184e86bf6a0\") pod \"minio\" (UID: \"66601624-94ae-4407-b0c6-b5da03ab86b6\") " pod="minio-dev/minio" Dec 09 17:10:10 crc kubenswrapper[4840]: I1209 17:10:10.975346 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.078814 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xfchg"] Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.080155 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.086837 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xfchg"] Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.198616 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-catalog-content\") pod \"redhat-operators-xfchg\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.198703 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-utilities\") pod \"redhat-operators-xfchg\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.198759 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pq8s\" (UniqueName: \"kubernetes.io/projected/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-kube-api-access-9pq8s\") pod \"redhat-operators-xfchg\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.295911 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.300246 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-catalog-content\") pod \"redhat-operators-xfchg\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.300313 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-utilities\") pod \"redhat-operators-xfchg\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.300361 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pq8s\" (UniqueName: \"kubernetes.io/projected/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-kube-api-access-9pq8s\") pod \"redhat-operators-xfchg\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.300900 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-catalog-content\") pod \"redhat-operators-xfchg\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.300909 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-utilities\") pod \"redhat-operators-xfchg\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:11 crc kubenswrapper[4840]: W1209 17:10:11.303228 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod66601624_94ae_4407_b0c6_b5da03ab86b6.slice/crio-6ccc3294484d9ce38f82100dd1262a75f551880ba349c9c938926a6b4e735d3c WatchSource:0}: Error finding container 6ccc3294484d9ce38f82100dd1262a75f551880ba349c9c938926a6b4e735d3c: Status 404 returned error can't find the container with id 6ccc3294484d9ce38f82100dd1262a75f551880ba349c9c938926a6b4e735d3c Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.320824 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pq8s\" (UniqueName: \"kubernetes.io/projected/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-kube-api-access-9pq8s\") pod \"redhat-operators-xfchg\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.427315 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.665340 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xfchg"] Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.758506 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfchg" event={"ID":"3e4d3b8c-66e8-49cc-bb50-a1ba74814577","Type":"ContainerStarted","Data":"ab2f9cce06a5870c4ab0d66ff4452770b7822063ffef652b2011a444a5f55207"} Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.759697 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"66601624-94ae-4407-b0c6-b5da03ab86b6","Type":"ContainerStarted","Data":"6ccc3294484d9ce38f82100dd1262a75f551880ba349c9c938926a6b4e735d3c"} Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.761823 4840 generic.go:334] "Generic (PLEG): container finished" podID="9264f055-61da-418d-8dad-4b0c00694797" containerID="05d5b8ff1ea7050e7fe515948c3f3ea43460f9a12de610bb3c7f44acd268ee92" exitCode=0 Dec 09 17:10:11 crc kubenswrapper[4840]: I1209 17:10:11.761854 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" event={"ID":"9264f055-61da-418d-8dad-4b0c00694797","Type":"ContainerDied","Data":"05d5b8ff1ea7050e7fe515948c3f3ea43460f9a12de610bb3c7f44acd268ee92"} Dec 09 17:10:12 crc kubenswrapper[4840]: I1209 17:10:12.768325 4840 generic.go:334] "Generic (PLEG): container finished" podID="9264f055-61da-418d-8dad-4b0c00694797" containerID="20c9675be2661f90aeb4d2704c3f84997762eee1c6f89a62758ef4587fa1bb4e" exitCode=0 Dec 09 17:10:12 crc kubenswrapper[4840]: I1209 17:10:12.768649 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" event={"ID":"9264f055-61da-418d-8dad-4b0c00694797","Type":"ContainerDied","Data":"20c9675be2661f90aeb4d2704c3f84997762eee1c6f89a62758ef4587fa1bb4e"} Dec 09 17:10:12 crc kubenswrapper[4840]: I1209 17:10:12.773874 4840 generic.go:334] "Generic (PLEG): container finished" podID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerID="139ab9d0d2edb36ce0a6570f38eec7101b0fb7c32cfd8112c0b8f4a498aa66df" exitCode=0 Dec 09 17:10:12 crc kubenswrapper[4840]: I1209 17:10:12.773921 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfchg" event={"ID":"3e4d3b8c-66e8-49cc-bb50-a1ba74814577","Type":"ContainerDied","Data":"139ab9d0d2edb36ce0a6570f38eec7101b0fb7c32cfd8112c0b8f4a498aa66df"} Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.293942 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.438477 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-util\") pod \"9264f055-61da-418d-8dad-4b0c00694797\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.438567 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rhzp\" (UniqueName: \"kubernetes.io/projected/9264f055-61da-418d-8dad-4b0c00694797-kube-api-access-9rhzp\") pod \"9264f055-61da-418d-8dad-4b0c00694797\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.438699 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-bundle\") pod \"9264f055-61da-418d-8dad-4b0c00694797\" (UID: \"9264f055-61da-418d-8dad-4b0c00694797\") " Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.452159 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-bundle" (OuterVolumeSpecName: "bundle") pod "9264f055-61da-418d-8dad-4b0c00694797" (UID: "9264f055-61da-418d-8dad-4b0c00694797"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.454994 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-util" (OuterVolumeSpecName: "util") pod "9264f055-61da-418d-8dad-4b0c00694797" (UID: "9264f055-61da-418d-8dad-4b0c00694797"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.484155 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9264f055-61da-418d-8dad-4b0c00694797-kube-api-access-9rhzp" (OuterVolumeSpecName: "kube-api-access-9rhzp") pod "9264f055-61da-418d-8dad-4b0c00694797" (UID: "9264f055-61da-418d-8dad-4b0c00694797"). InnerVolumeSpecName "kube-api-access-9rhzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.539857 4840 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-util\") on node \"crc\" DevicePath \"\"" Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.539894 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rhzp\" (UniqueName: \"kubernetes.io/projected/9264f055-61da-418d-8dad-4b0c00694797-kube-api-access-9rhzp\") on node \"crc\" DevicePath \"\"" Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.539907 4840 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9264f055-61da-418d-8dad-4b0c00694797-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.788275 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" event={"ID":"9264f055-61da-418d-8dad-4b0c00694797","Type":"ContainerDied","Data":"56630529b357bde480060e44f851d8838c7678c6df9e636f857df33b0d42867b"} Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.788589 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="56630529b357bde480060e44f851d8838c7678c6df9e636f857df33b0d42867b" Dec 09 17:10:14 crc kubenswrapper[4840]: I1209 17:10:14.788598 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc" Dec 09 17:10:15 crc kubenswrapper[4840]: I1209 17:10:15.799766 4840 generic.go:334] "Generic (PLEG): container finished" podID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerID="f1caf2289bc33095de521768b4c3bdbe6ed0598e843c432e33b8e5310ec1c3f6" exitCode=0 Dec 09 17:10:15 crc kubenswrapper[4840]: I1209 17:10:15.799879 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfchg" event={"ID":"3e4d3b8c-66e8-49cc-bb50-a1ba74814577","Type":"ContainerDied","Data":"f1caf2289bc33095de521768b4c3bdbe6ed0598e843c432e33b8e5310ec1c3f6"} Dec 09 17:10:15 crc kubenswrapper[4840]: I1209 17:10:15.803595 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"66601624-94ae-4407-b0c6-b5da03ab86b6","Type":"ContainerStarted","Data":"8058182662d8211a458e41adaedefa1c1c4a6203af0f1a42e7c87aa9e5674cb3"} Dec 09 17:10:15 crc kubenswrapper[4840]: I1209 17:10:15.866257 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=4.485779447 podStartE2EDuration="7.866231232s" podCreationTimestamp="2025-12-09 17:10:08 +0000 UTC" firstStartedPulling="2025-12-09 17:10:11.305358274 +0000 UTC m=+797.296468907" lastFinishedPulling="2025-12-09 17:10:14.685810059 +0000 UTC m=+800.676920692" observedRunningTime="2025-12-09 17:10:15.863514848 +0000 UTC m=+801.854625551" watchObservedRunningTime="2025-12-09 17:10:15.866231232 +0000 UTC m=+801.857341905" Dec 09 17:10:16 crc kubenswrapper[4840]: I1209 17:10:16.811699 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfchg" event={"ID":"3e4d3b8c-66e8-49cc-bb50-a1ba74814577","Type":"ContainerStarted","Data":"0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b"} Dec 09 17:10:16 crc kubenswrapper[4840]: I1209 17:10:16.834761 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xfchg" podStartSLOduration=2.5057828559999997 podStartE2EDuration="5.834742738s" podCreationTimestamp="2025-12-09 17:10:11 +0000 UTC" firstStartedPulling="2025-12-09 17:10:12.912808733 +0000 UTC m=+798.903919366" lastFinishedPulling="2025-12-09 17:10:16.241768605 +0000 UTC m=+802.232879248" observedRunningTime="2025-12-09 17:10:16.832150466 +0000 UTC m=+802.823261099" watchObservedRunningTime="2025-12-09 17:10:16.834742738 +0000 UTC m=+802.825853371" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.246358 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw"] Dec 09 17:10:20 crc kubenswrapper[4840]: E1209 17:10:20.247100 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9264f055-61da-418d-8dad-4b0c00694797" containerName="pull" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.247115 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9264f055-61da-418d-8dad-4b0c00694797" containerName="pull" Dec 09 17:10:20 crc kubenswrapper[4840]: E1209 17:10:20.247125 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9264f055-61da-418d-8dad-4b0c00694797" containerName="util" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.247132 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9264f055-61da-418d-8dad-4b0c00694797" containerName="util" Dec 09 17:10:20 crc kubenswrapper[4840]: E1209 17:10:20.247142 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9264f055-61da-418d-8dad-4b0c00694797" containerName="extract" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.247147 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9264f055-61da-418d-8dad-4b0c00694797" containerName="extract" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.247245 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="9264f055-61da-418d-8dad-4b0c00694797" containerName="extract" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.247868 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.252222 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.252428 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.253726 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.253827 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-thh9x" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.253827 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.260596 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.272868 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw"] Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.322356 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e0cfa500-d9f5-41c2-a215-661d8d8976cf-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.322429 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e0cfa500-d9f5-41c2-a215-661d8d8976cf-apiservice-cert\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.322479 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e0cfa500-d9f5-41c2-a215-661d8d8976cf-webhook-cert\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.322617 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dgzv\" (UniqueName: \"kubernetes.io/projected/e0cfa500-d9f5-41c2-a215-661d8d8976cf-kube-api-access-7dgzv\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.322728 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/e0cfa500-d9f5-41c2-a215-661d8d8976cf-manager-config\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.424298 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/e0cfa500-d9f5-41c2-a215-661d8d8976cf-manager-config\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.424372 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e0cfa500-d9f5-41c2-a215-661d8d8976cf-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.424407 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e0cfa500-d9f5-41c2-a215-661d8d8976cf-apiservice-cert\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.424441 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e0cfa500-d9f5-41c2-a215-661d8d8976cf-webhook-cert\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.424473 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dgzv\" (UniqueName: \"kubernetes.io/projected/e0cfa500-d9f5-41c2-a215-661d8d8976cf-kube-api-access-7dgzv\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.425225 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/e0cfa500-d9f5-41c2-a215-661d8d8976cf-manager-config\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.430805 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e0cfa500-d9f5-41c2-a215-661d8d8976cf-webhook-cert\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.431157 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e0cfa500-d9f5-41c2-a215-661d8d8976cf-apiservice-cert\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.431840 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e0cfa500-d9f5-41c2-a215-661d8d8976cf-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.446821 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dgzv\" (UniqueName: \"kubernetes.io/projected/e0cfa500-d9f5-41c2-a215-661d8d8976cf-kube-api-access-7dgzv\") pod \"loki-operator-controller-manager-648c77c5bd-sjprw\" (UID: \"e0cfa500-d9f5-41c2-a215-661d8d8976cf\") " pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.561770 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:20 crc kubenswrapper[4840]: I1209 17:10:20.973135 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw"] Dec 09 17:10:21 crc kubenswrapper[4840]: I1209 17:10:21.427506 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:21 crc kubenswrapper[4840]: I1209 17:10:21.427568 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:21 crc kubenswrapper[4840]: I1209 17:10:21.838334 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" event={"ID":"e0cfa500-d9f5-41c2-a215-661d8d8976cf","Type":"ContainerStarted","Data":"272f2ed0c7304291aed063498b8cbc88327782f7fb9fae52b7069db20a25e14f"} Dec 09 17:10:22 crc kubenswrapper[4840]: I1209 17:10:22.477852 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xfchg" podUID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerName="registry-server" probeResult="failure" output=< Dec 09 17:10:22 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 17:10:22 crc kubenswrapper[4840]: > Dec 09 17:10:28 crc kubenswrapper[4840]: I1209 17:10:28.885614 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" event={"ID":"e0cfa500-d9f5-41c2-a215-661d8d8976cf","Type":"ContainerStarted","Data":"4101e88ef9e8e1ae8109eedce4697036a9ed181c28942bdd8076055872db6e85"} Dec 09 17:10:31 crc kubenswrapper[4840]: I1209 17:10:31.475294 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:31 crc kubenswrapper[4840]: I1209 17:10:31.548141 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:33 crc kubenswrapper[4840]: I1209 17:10:33.868565 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xfchg"] Dec 09 17:10:33 crc kubenswrapper[4840]: I1209 17:10:33.868793 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xfchg" podUID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerName="registry-server" containerID="cri-o://0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b" gracePeriod=2 Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.793196 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.871399 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-catalog-content\") pod \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.871491 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-utilities\") pod \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.871564 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pq8s\" (UniqueName: \"kubernetes.io/projected/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-kube-api-access-9pq8s\") pod \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\" (UID: \"3e4d3b8c-66e8-49cc-bb50-a1ba74814577\") " Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.872454 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-utilities" (OuterVolumeSpecName: "utilities") pod "3e4d3b8c-66e8-49cc-bb50-a1ba74814577" (UID: "3e4d3b8c-66e8-49cc-bb50-a1ba74814577"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.879381 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-kube-api-access-9pq8s" (OuterVolumeSpecName: "kube-api-access-9pq8s") pod "3e4d3b8c-66e8-49cc-bb50-a1ba74814577" (UID: "3e4d3b8c-66e8-49cc-bb50-a1ba74814577"). InnerVolumeSpecName "kube-api-access-9pq8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.941423 4840 generic.go:334] "Generic (PLEG): container finished" podID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerID="0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b" exitCode=0 Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.941482 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfchg" event={"ID":"3e4d3b8c-66e8-49cc-bb50-a1ba74814577","Type":"ContainerDied","Data":"0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b"} Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.941756 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfchg" event={"ID":"3e4d3b8c-66e8-49cc-bb50-a1ba74814577","Type":"ContainerDied","Data":"ab2f9cce06a5870c4ab0d66ff4452770b7822063ffef652b2011a444a5f55207"} Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.941779 4840 scope.go:117] "RemoveContainer" containerID="0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.941522 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xfchg" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.957266 4840 scope.go:117] "RemoveContainer" containerID="f1caf2289bc33095de521768b4c3bdbe6ed0598e843c432e33b8e5310ec1c3f6" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.972556 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.972580 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pq8s\" (UniqueName: \"kubernetes.io/projected/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-kube-api-access-9pq8s\") on node \"crc\" DevicePath \"\"" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.978326 4840 scope.go:117] "RemoveContainer" containerID="139ab9d0d2edb36ce0a6570f38eec7101b0fb7c32cfd8112c0b8f4a498aa66df" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.989013 4840 scope.go:117] "RemoveContainer" containerID="0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b" Dec 09 17:10:34 crc kubenswrapper[4840]: E1209 17:10:34.989355 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b\": container with ID starting with 0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b not found: ID does not exist" containerID="0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.989394 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b"} err="failed to get container status \"0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b\": rpc error: code = NotFound desc = could not find container \"0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b\": container with ID starting with 0cb11afdc54d15cab94befcc95ddfbcdc59183f27478d92b102bdabc52f4c82b not found: ID does not exist" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.989418 4840 scope.go:117] "RemoveContainer" containerID="f1caf2289bc33095de521768b4c3bdbe6ed0598e843c432e33b8e5310ec1c3f6" Dec 09 17:10:34 crc kubenswrapper[4840]: E1209 17:10:34.989660 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1caf2289bc33095de521768b4c3bdbe6ed0598e843c432e33b8e5310ec1c3f6\": container with ID starting with f1caf2289bc33095de521768b4c3bdbe6ed0598e843c432e33b8e5310ec1c3f6 not found: ID does not exist" containerID="f1caf2289bc33095de521768b4c3bdbe6ed0598e843c432e33b8e5310ec1c3f6" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.989690 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1caf2289bc33095de521768b4c3bdbe6ed0598e843c432e33b8e5310ec1c3f6"} err="failed to get container status \"f1caf2289bc33095de521768b4c3bdbe6ed0598e843c432e33b8e5310ec1c3f6\": rpc error: code = NotFound desc = could not find container \"f1caf2289bc33095de521768b4c3bdbe6ed0598e843c432e33b8e5310ec1c3f6\": container with ID starting with f1caf2289bc33095de521768b4c3bdbe6ed0598e843c432e33b8e5310ec1c3f6 not found: ID does not exist" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.989712 4840 scope.go:117] "RemoveContainer" containerID="139ab9d0d2edb36ce0a6570f38eec7101b0fb7c32cfd8112c0b8f4a498aa66df" Dec 09 17:10:34 crc kubenswrapper[4840]: E1209 17:10:34.989912 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"139ab9d0d2edb36ce0a6570f38eec7101b0fb7c32cfd8112c0b8f4a498aa66df\": container with ID starting with 139ab9d0d2edb36ce0a6570f38eec7101b0fb7c32cfd8112c0b8f4a498aa66df not found: ID does not exist" containerID="139ab9d0d2edb36ce0a6570f38eec7101b0fb7c32cfd8112c0b8f4a498aa66df" Dec 09 17:10:34 crc kubenswrapper[4840]: I1209 17:10:34.989933 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"139ab9d0d2edb36ce0a6570f38eec7101b0fb7c32cfd8112c0b8f4a498aa66df"} err="failed to get container status \"139ab9d0d2edb36ce0a6570f38eec7101b0fb7c32cfd8112c0b8f4a498aa66df\": rpc error: code = NotFound desc = could not find container \"139ab9d0d2edb36ce0a6570f38eec7101b0fb7c32cfd8112c0b8f4a498aa66df\": container with ID starting with 139ab9d0d2edb36ce0a6570f38eec7101b0fb7c32cfd8112c0b8f4a498aa66df not found: ID does not exist" Dec 09 17:10:35 crc kubenswrapper[4840]: I1209 17:10:35.008984 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3e4d3b8c-66e8-49cc-bb50-a1ba74814577" (UID: "3e4d3b8c-66e8-49cc-bb50-a1ba74814577"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:10:35 crc kubenswrapper[4840]: I1209 17:10:35.075109 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e4d3b8c-66e8-49cc-bb50-a1ba74814577-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:10:35 crc kubenswrapper[4840]: I1209 17:10:35.287898 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xfchg"] Dec 09 17:10:35 crc kubenswrapper[4840]: I1209 17:10:35.293117 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xfchg"] Dec 09 17:10:35 crc kubenswrapper[4840]: I1209 17:10:35.950450 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" event={"ID":"e0cfa500-d9f5-41c2-a215-661d8d8976cf","Type":"ContainerStarted","Data":"4dcc8996741e4dead8df753c26d75f0be694049e32769bf8d6bda8b4a0d37f28"} Dec 09 17:10:35 crc kubenswrapper[4840]: I1209 17:10:35.950739 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:35 crc kubenswrapper[4840]: I1209 17:10:35.953341 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" Dec 09 17:10:35 crc kubenswrapper[4840]: I1209 17:10:35.984411 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-648c77c5bd-sjprw" podStartSLOduration=2.161683547 podStartE2EDuration="15.984388028s" podCreationTimestamp="2025-12-09 17:10:20 +0000 UTC" firstStartedPulling="2025-12-09 17:10:20.989363862 +0000 UTC m=+806.980474515" lastFinishedPulling="2025-12-09 17:10:34.812068363 +0000 UTC m=+820.803178996" observedRunningTime="2025-12-09 17:10:35.976318782 +0000 UTC m=+821.967429415" watchObservedRunningTime="2025-12-09 17:10:35.984388028 +0000 UTC m=+821.975498681" Dec 09 17:10:36 crc kubenswrapper[4840]: I1209 17:10:36.617063 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" path="/var/lib/kubelet/pods/3e4d3b8c-66e8-49cc-bb50-a1ba74814577/volumes" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.302692 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj"] Dec 09 17:11:08 crc kubenswrapper[4840]: E1209 17:11:08.303628 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerName="extract-content" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.303648 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerName="extract-content" Dec 09 17:11:08 crc kubenswrapper[4840]: E1209 17:11:08.303667 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerName="extract-utilities" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.303678 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerName="extract-utilities" Dec 09 17:11:08 crc kubenswrapper[4840]: E1209 17:11:08.303705 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerName="registry-server" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.303719 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerName="registry-server" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.303909 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e4d3b8c-66e8-49cc-bb50-a1ba74814577" containerName="registry-server" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.305293 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.307734 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.318216 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj"] Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.392473 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.392523 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.392591 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdfcb\" (UniqueName: \"kubernetes.io/projected/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-kube-api-access-sdfcb\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.493882 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdfcb\" (UniqueName: \"kubernetes.io/projected/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-kube-api-access-sdfcb\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.494008 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.494040 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.494734 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.494891 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.518104 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdfcb\" (UniqueName: \"kubernetes.io/projected/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-kube-api-access-sdfcb\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:08 crc kubenswrapper[4840]: I1209 17:11:08.684354 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:09 crc kubenswrapper[4840]: I1209 17:11:09.175630 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj"] Dec 09 17:11:10 crc kubenswrapper[4840]: I1209 17:11:10.185558 4840 generic.go:334] "Generic (PLEG): container finished" podID="0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" containerID="f8f0864d2a451437e3b55f57e6b6d302647ae6218eb5e346611652a7547d7115" exitCode=0 Dec 09 17:11:10 crc kubenswrapper[4840]: I1209 17:11:10.185657 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" event={"ID":"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb","Type":"ContainerDied","Data":"f8f0864d2a451437e3b55f57e6b6d302647ae6218eb5e346611652a7547d7115"} Dec 09 17:11:10 crc kubenswrapper[4840]: I1209 17:11:10.186023 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" event={"ID":"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb","Type":"ContainerStarted","Data":"2866f9c79ee80050040c6c835fcb42a5a04c59ed855407e415280e48bf56a326"} Dec 09 17:11:12 crc kubenswrapper[4840]: I1209 17:11:12.204012 4840 generic.go:334] "Generic (PLEG): container finished" podID="0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" containerID="328baaa23ba19c46787a70b10f95a10b3a3e3918bc7f80c61b2b45f3897cac2e" exitCode=0 Dec 09 17:11:12 crc kubenswrapper[4840]: I1209 17:11:12.204203 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" event={"ID":"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb","Type":"ContainerDied","Data":"328baaa23ba19c46787a70b10f95a10b3a3e3918bc7f80c61b2b45f3897cac2e"} Dec 09 17:11:13 crc kubenswrapper[4840]: I1209 17:11:13.212777 4840 generic.go:334] "Generic (PLEG): container finished" podID="0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" containerID="cf2da397d42c79bc7a21157fc3b737b156d08733719ecd7913f1ee05079e462c" exitCode=0 Dec 09 17:11:13 crc kubenswrapper[4840]: I1209 17:11:13.212838 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" event={"ID":"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb","Type":"ContainerDied","Data":"cf2da397d42c79bc7a21157fc3b737b156d08733719ecd7913f1ee05079e462c"} Dec 09 17:11:14 crc kubenswrapper[4840]: I1209 17:11:14.534933 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:14 crc kubenswrapper[4840]: I1209 17:11:14.677476 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-bundle\") pod \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " Dec 09 17:11:14 crc kubenswrapper[4840]: I1209 17:11:14.677740 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdfcb\" (UniqueName: \"kubernetes.io/projected/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-kube-api-access-sdfcb\") pod \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " Dec 09 17:11:14 crc kubenswrapper[4840]: I1209 17:11:14.677850 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-util\") pod \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\" (UID: \"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb\") " Dec 09 17:11:14 crc kubenswrapper[4840]: I1209 17:11:14.678902 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-bundle" (OuterVolumeSpecName: "bundle") pod "0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" (UID: "0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:11:14 crc kubenswrapper[4840]: I1209 17:11:14.685899 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-kube-api-access-sdfcb" (OuterVolumeSpecName: "kube-api-access-sdfcb") pod "0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" (UID: "0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb"). InnerVolumeSpecName "kube-api-access-sdfcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:11:14 crc kubenswrapper[4840]: I1209 17:11:14.696944 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-util" (OuterVolumeSpecName: "util") pod "0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" (UID: "0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:11:14 crc kubenswrapper[4840]: I1209 17:11:14.779671 4840 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-util\") on node \"crc\" DevicePath \"\"" Dec 09 17:11:14 crc kubenswrapper[4840]: I1209 17:11:14.779731 4840 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:11:14 crc kubenswrapper[4840]: I1209 17:11:14.779752 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdfcb\" (UniqueName: \"kubernetes.io/projected/0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb-kube-api-access-sdfcb\") on node \"crc\" DevicePath \"\"" Dec 09 17:11:15 crc kubenswrapper[4840]: I1209 17:11:15.251092 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" event={"ID":"0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb","Type":"ContainerDied","Data":"2866f9c79ee80050040c6c835fcb42a5a04c59ed855407e415280e48bf56a326"} Dec 09 17:11:15 crc kubenswrapper[4840]: I1209 17:11:15.251148 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2866f9c79ee80050040c6c835fcb42a5a04c59ed855407e415280e48bf56a326" Dec 09 17:11:15 crc kubenswrapper[4840]: I1209 17:11:15.251563 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.228327 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-k2xmp"] Dec 09 17:11:20 crc kubenswrapper[4840]: E1209 17:11:20.228759 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" containerName="extract" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.228775 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" containerName="extract" Dec 09 17:11:20 crc kubenswrapper[4840]: E1209 17:11:20.228790 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" containerName="util" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.228799 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" containerName="util" Dec 09 17:11:20 crc kubenswrapper[4840]: E1209 17:11:20.228812 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" containerName="pull" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.228821 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" containerName="pull" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.228935 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb" containerName="extract" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.229499 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k2xmp" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.232234 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.232361 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-dvlxm" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.232534 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.240636 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-k2xmp"] Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.351069 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlvc9\" (UniqueName: \"kubernetes.io/projected/86e73f61-98bb-4332-9494-13c663fd8de7-kube-api-access-nlvc9\") pod \"nmstate-operator-5b5b58f5c8-k2xmp\" (UID: \"86e73f61-98bb-4332-9494-13c663fd8de7\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k2xmp" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.452482 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlvc9\" (UniqueName: \"kubernetes.io/projected/86e73f61-98bb-4332-9494-13c663fd8de7-kube-api-access-nlvc9\") pod \"nmstate-operator-5b5b58f5c8-k2xmp\" (UID: \"86e73f61-98bb-4332-9494-13c663fd8de7\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k2xmp" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.475632 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlvc9\" (UniqueName: \"kubernetes.io/projected/86e73f61-98bb-4332-9494-13c663fd8de7-kube-api-access-nlvc9\") pod \"nmstate-operator-5b5b58f5c8-k2xmp\" (UID: \"86e73f61-98bb-4332-9494-13c663fd8de7\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k2xmp" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.549919 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k2xmp" Dec 09 17:11:20 crc kubenswrapper[4840]: I1209 17:11:20.815175 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-k2xmp"] Dec 09 17:11:20 crc kubenswrapper[4840]: W1209 17:11:20.825337 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86e73f61_98bb_4332_9494_13c663fd8de7.slice/crio-2e681b0b1b1566769a7e28e8935616499998d54e8842fd877763d5d8d7f9a6e5 WatchSource:0}: Error finding container 2e681b0b1b1566769a7e28e8935616499998d54e8842fd877763d5d8d7f9a6e5: Status 404 returned error can't find the container with id 2e681b0b1b1566769a7e28e8935616499998d54e8842fd877763d5d8d7f9a6e5 Dec 09 17:11:21 crc kubenswrapper[4840]: I1209 17:11:21.290108 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k2xmp" event={"ID":"86e73f61-98bb-4332-9494-13c663fd8de7","Type":"ContainerStarted","Data":"2e681b0b1b1566769a7e28e8935616499998d54e8842fd877763d5d8d7f9a6e5"} Dec 09 17:11:23 crc kubenswrapper[4840]: I1209 17:11:23.303491 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k2xmp" event={"ID":"86e73f61-98bb-4332-9494-13c663fd8de7","Type":"ContainerStarted","Data":"d43450ba32e74d1bf4bb7925bbcba0e3d261ddedb6f48ba0dee19a676e0a2226"} Dec 09 17:11:23 crc kubenswrapper[4840]: I1209 17:11:23.329660 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k2xmp" podStartSLOduration=1.518787002 podStartE2EDuration="3.329634906s" podCreationTimestamp="2025-12-09 17:11:20 +0000 UTC" firstStartedPulling="2025-12-09 17:11:20.827511812 +0000 UTC m=+866.818622445" lastFinishedPulling="2025-12-09 17:11:22.638359716 +0000 UTC m=+868.629470349" observedRunningTime="2025-12-09 17:11:23.323124267 +0000 UTC m=+869.314234910" watchObservedRunningTime="2025-12-09 17:11:23.329634906 +0000 UTC m=+869.320745549" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.396793 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf"] Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.398132 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.401208 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-gqrt9" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.407046 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf"] Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.411096 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf"] Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.411726 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.416383 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.430227 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-7s69c"] Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.431196 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.438934 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf"] Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.466028 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s72lb\" (UniqueName: \"kubernetes.io/projected/ca745f75-67ee-4db8-b8e9-49a7ab4cf95e-kube-api-access-s72lb\") pod \"nmstate-metrics-7f946cbc9-rtwkf\" (UID: \"ca745f75-67ee-4db8-b8e9-49a7ab4cf95e\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.466168 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-wbtkf\" (UID: \"0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.466243 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkpzb\" (UniqueName: \"kubernetes.io/projected/0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b-kube-api-access-kkpzb\") pod \"nmstate-webhook-5f6d4c5ccb-wbtkf\" (UID: \"0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.547736 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr"] Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.548642 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:29 crc kubenswrapper[4840]: W1209 17:11:29.550187 4840 reflector.go:561] object-"openshift-nmstate"/"nginx-conf": failed to list *v1.ConfigMap: configmaps "nginx-conf" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-nmstate": no relationship found between node 'crc' and this object Dec 09 17:11:29 crc kubenswrapper[4840]: E1209 17:11:29.550291 4840 reflector.go:158] "Unhandled Error" err="object-\"openshift-nmstate\"/\"nginx-conf\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"nginx-conf\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-nmstate\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 09 17:11:29 crc kubenswrapper[4840]: W1209 17:11:29.550203 4840 reflector.go:561] object-"openshift-nmstate"/"default-dockercfg-rnfc5": failed to list *v1.Secret: secrets "default-dockercfg-rnfc5" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-nmstate": no relationship found between node 'crc' and this object Dec 09 17:11:29 crc kubenswrapper[4840]: E1209 17:11:29.550355 4840 reflector.go:158] "Unhandled Error" err="object-\"openshift-nmstate\"/\"default-dockercfg-rnfc5\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"default-dockercfg-rnfc5\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-nmstate\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.550463 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.566973 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7g2p\" (UniqueName: \"kubernetes.io/projected/e80364ae-306f-494d-aaa8-da74475771d0-kube-api-access-r7g2p\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.567039 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkpzb\" (UniqueName: \"kubernetes.io/projected/0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b-kube-api-access-kkpzb\") pod \"nmstate-webhook-5f6d4c5ccb-wbtkf\" (UID: \"0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.567071 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e80364ae-306f-494d-aaa8-da74475771d0-nmstate-lock\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.567103 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s72lb\" (UniqueName: \"kubernetes.io/projected/ca745f75-67ee-4db8-b8e9-49a7ab4cf95e-kube-api-access-s72lb\") pod \"nmstate-metrics-7f946cbc9-rtwkf\" (UID: \"ca745f75-67ee-4db8-b8e9-49a7ab4cf95e\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.567121 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e80364ae-306f-494d-aaa8-da74475771d0-ovs-socket\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.567145 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e80364ae-306f-494d-aaa8-da74475771d0-dbus-socket\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.567162 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-wbtkf\" (UID: \"0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" Dec 09 17:11:29 crc kubenswrapper[4840]: E1209 17:11:29.567276 4840 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Dec 09 17:11:29 crc kubenswrapper[4840]: E1209 17:11:29.567339 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b-tls-key-pair podName:0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b nodeName:}" failed. No retries permitted until 2025-12-09 17:11:30.067306202 +0000 UTC m=+876.058416835 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-wbtkf" (UID: "0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b") : secret "openshift-nmstate-webhook" not found Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.575027 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr"] Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.591270 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s72lb\" (UniqueName: \"kubernetes.io/projected/ca745f75-67ee-4db8-b8e9-49a7ab4cf95e-kube-api-access-s72lb\") pod \"nmstate-metrics-7f946cbc9-rtwkf\" (UID: \"ca745f75-67ee-4db8-b8e9-49a7ab4cf95e\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.592555 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkpzb\" (UniqueName: \"kubernetes.io/projected/0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b-kube-api-access-kkpzb\") pod \"nmstate-webhook-5f6d4c5ccb-wbtkf\" (UID: \"0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.668247 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e80364ae-306f-494d-aaa8-da74475771d0-dbus-socket\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.668360 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/8a0577b2-63ef-4d11-8f94-e847b9c5a520-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-wblsr\" (UID: \"8a0577b2-63ef-4d11-8f94-e847b9c5a520\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.668399 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7g2p\" (UniqueName: \"kubernetes.io/projected/e80364ae-306f-494d-aaa8-da74475771d0-kube-api-access-r7g2p\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.668454 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e80364ae-306f-494d-aaa8-da74475771d0-nmstate-lock\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.668493 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/8a0577b2-63ef-4d11-8f94-e847b9c5a520-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-wblsr\" (UID: \"8a0577b2-63ef-4d11-8f94-e847b9c5a520\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.668516 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrk25\" (UniqueName: \"kubernetes.io/projected/8a0577b2-63ef-4d11-8f94-e847b9c5a520-kube-api-access-vrk25\") pod \"nmstate-console-plugin-7fbb5f6569-wblsr\" (UID: \"8a0577b2-63ef-4d11-8f94-e847b9c5a520\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.668539 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e80364ae-306f-494d-aaa8-da74475771d0-ovs-socket\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.668598 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e80364ae-306f-494d-aaa8-da74475771d0-ovs-socket\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.668654 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e80364ae-306f-494d-aaa8-da74475771d0-nmstate-lock\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.668671 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e80364ae-306f-494d-aaa8-da74475771d0-dbus-socket\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.686821 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7g2p\" (UniqueName: \"kubernetes.io/projected/e80364ae-306f-494d-aaa8-da74475771d0-kube-api-access-r7g2p\") pod \"nmstate-handler-7s69c\" (UID: \"e80364ae-306f-494d-aaa8-da74475771d0\") " pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.728791 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7c98c5c9b5-mz7mp"] Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.729693 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.743366 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7c98c5c9b5-mz7mp"] Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.757380 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.771751 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrk25\" (UniqueName: \"kubernetes.io/projected/8a0577b2-63ef-4d11-8f94-e847b9c5a520-kube-api-access-vrk25\") pod \"nmstate-console-plugin-7fbb5f6569-wblsr\" (UID: \"8a0577b2-63ef-4d11-8f94-e847b9c5a520\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.771819 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-oauth-serving-cert\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.771895 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/8a0577b2-63ef-4d11-8f94-e847b9c5a520-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-wblsr\" (UID: \"8a0577b2-63ef-4d11-8f94-e847b9c5a520\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.771935 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtn75\" (UniqueName: \"kubernetes.io/projected/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-kube-api-access-dtn75\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.772072 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-trusted-ca-bundle\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.772116 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-service-ca\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.772148 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-console-serving-cert\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.772179 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-console-oauth-config\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.772237 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-console-config\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.772263 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/8a0577b2-63ef-4d11-8f94-e847b9c5a520-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-wblsr\" (UID: \"8a0577b2-63ef-4d11-8f94-e847b9c5a520\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:29 crc kubenswrapper[4840]: E1209 17:11:29.772369 4840 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Dec 09 17:11:29 crc kubenswrapper[4840]: E1209 17:11:29.772431 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a0577b2-63ef-4d11-8f94-e847b9c5a520-plugin-serving-cert podName:8a0577b2-63ef-4d11-8f94-e847b9c5a520 nodeName:}" failed. No retries permitted until 2025-12-09 17:11:30.272413065 +0000 UTC m=+876.263523718 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/8a0577b2-63ef-4d11-8f94-e847b9c5a520-plugin-serving-cert") pod "nmstate-console-plugin-7fbb5f6569-wblsr" (UID: "8a0577b2-63ef-4d11-8f94-e847b9c5a520") : secret "plugin-serving-cert" not found Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.789641 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.796686 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrk25\" (UniqueName: \"kubernetes.io/projected/8a0577b2-63ef-4d11-8f94-e847b9c5a520-kube-api-access-vrk25\") pod \"nmstate-console-plugin-7fbb5f6569-wblsr\" (UID: \"8a0577b2-63ef-4d11-8f94-e847b9c5a520\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.876802 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-oauth-serving-cert\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.876944 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtn75\" (UniqueName: \"kubernetes.io/projected/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-kube-api-access-dtn75\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.877000 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-trusted-ca-bundle\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.877034 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-service-ca\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.877085 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-console-serving-cert\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.877116 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-console-oauth-config\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.877171 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-console-config\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.878415 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-console-config\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.878858 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-oauth-serving-cert\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.878985 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-service-ca\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.879258 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-trusted-ca-bundle\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.888818 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-console-oauth-config\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.895396 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-console-serving-cert\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:29 crc kubenswrapper[4840]: I1209 17:11:29.903711 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtn75\" (UniqueName: \"kubernetes.io/projected/3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f-kube-api-access-dtn75\") pod \"console-7c98c5c9b5-mz7mp\" (UID: \"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f\") " pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.045115 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.082697 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-wbtkf\" (UID: \"0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.085610 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-wbtkf\" (UID: \"0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.253193 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7c98c5c9b5-mz7mp"] Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.257258 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf"] Dec 09 17:11:30 crc kubenswrapper[4840]: W1209 17:11:30.259079 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3ffc0d3a_acfd_4f4c_bb8d_887cbc755c7f.slice/crio-485f1b3fcceb43e80e17c4be600658eb5994cf7abc01820e9a734cf9897dd762 WatchSource:0}: Error finding container 485f1b3fcceb43e80e17c4be600658eb5994cf7abc01820e9a734cf9897dd762: Status 404 returned error can't find the container with id 485f1b3fcceb43e80e17c4be600658eb5994cf7abc01820e9a734cf9897dd762 Dec 09 17:11:30 crc kubenswrapper[4840]: W1209 17:11:30.260208 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podca745f75_67ee_4db8_b8e9_49a7ab4cf95e.slice/crio-5be116315fdc1f03dc9c9908ddde7c2ec8490190c6f8ca472daa842e9c4128ea WatchSource:0}: Error finding container 5be116315fdc1f03dc9c9908ddde7c2ec8490190c6f8ca472daa842e9c4128ea: Status 404 returned error can't find the container with id 5be116315fdc1f03dc9c9908ddde7c2ec8490190c6f8ca472daa842e9c4128ea Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.284560 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/8a0577b2-63ef-4d11-8f94-e847b9c5a520-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-wblsr\" (UID: \"8a0577b2-63ef-4d11-8f94-e847b9c5a520\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.291143 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/8a0577b2-63ef-4d11-8f94-e847b9c5a520-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-wblsr\" (UID: \"8a0577b2-63ef-4d11-8f94-e847b9c5a520\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.360194 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf" event={"ID":"ca745f75-67ee-4db8-b8e9-49a7ab4cf95e","Type":"ContainerStarted","Data":"5be116315fdc1f03dc9c9908ddde7c2ec8490190c6f8ca472daa842e9c4128ea"} Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.361414 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7c98c5c9b5-mz7mp" event={"ID":"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f","Type":"ContainerStarted","Data":"485f1b3fcceb43e80e17c4be600658eb5994cf7abc01820e9a734cf9897dd762"} Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.362325 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-7s69c" event={"ID":"e80364ae-306f-494d-aaa8-da74475771d0","Type":"ContainerStarted","Data":"4680bf59b499a09d4e7c4b31bd9f6288b9d3a0d2d20f7a056ce3a4947eb05fff"} Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.374863 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.623190 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf"] Dec 09 17:11:30 crc kubenswrapper[4840]: W1209 17:11:30.626238 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c8f9850_74a1_43bb_b4f1_07e8e7ea0c7b.slice/crio-4558dbec7f1f01a16cb1ad3b8d8744f5e37bff9f03f3f3bf2fb58f63a1da9db6 WatchSource:0}: Error finding container 4558dbec7f1f01a16cb1ad3b8d8744f5e37bff9f03f3f3bf2fb58f63a1da9db6: Status 404 returned error can't find the container with id 4558dbec7f1f01a16cb1ad3b8d8744f5e37bff9f03f3f3bf2fb58f63a1da9db6 Dec 09 17:11:30 crc kubenswrapper[4840]: E1209 17:11:30.773071 4840 configmap.go:193] Couldn't get configMap openshift-nmstate/nginx-conf: failed to sync configmap cache: timed out waiting for the condition Dec 09 17:11:30 crc kubenswrapper[4840]: E1209 17:11:30.773216 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8a0577b2-63ef-4d11-8f94-e847b9c5a520-nginx-conf podName:8a0577b2-63ef-4d11-8f94-e847b9c5a520 nodeName:}" failed. No retries permitted until 2025-12-09 17:11:31.273183618 +0000 UTC m=+877.264294291 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/8a0577b2-63ef-4d11-8f94-e847b9c5a520-nginx-conf") pod "nmstate-console-plugin-7fbb5f6569-wblsr" (UID: "8a0577b2-63ef-4d11-8f94-e847b9c5a520") : failed to sync configmap cache: timed out waiting for the condition Dec 09 17:11:30 crc kubenswrapper[4840]: I1209 17:11:30.893277 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-rnfc5" Dec 09 17:11:31 crc kubenswrapper[4840]: I1209 17:11:31.025227 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Dec 09 17:11:31 crc kubenswrapper[4840]: I1209 17:11:31.310509 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/8a0577b2-63ef-4d11-8f94-e847b9c5a520-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-wblsr\" (UID: \"8a0577b2-63ef-4d11-8f94-e847b9c5a520\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:31 crc kubenswrapper[4840]: I1209 17:11:31.311484 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/8a0577b2-63ef-4d11-8f94-e847b9c5a520-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-wblsr\" (UID: \"8a0577b2-63ef-4d11-8f94-e847b9c5a520\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:31 crc kubenswrapper[4840]: I1209 17:11:31.361073 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" Dec 09 17:11:31 crc kubenswrapper[4840]: I1209 17:11:31.379139 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7c98c5c9b5-mz7mp" event={"ID":"3ffc0d3a-acfd-4f4c-bb8d-887cbc755c7f","Type":"ContainerStarted","Data":"17831d3310a8aecbefb735b188e4c596a024e11d27a51a596a47a25a9d8181b0"} Dec 09 17:11:31 crc kubenswrapper[4840]: I1209 17:11:31.381046 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" event={"ID":"0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b","Type":"ContainerStarted","Data":"4558dbec7f1f01a16cb1ad3b8d8744f5e37bff9f03f3f3bf2fb58f63a1da9db6"} Dec 09 17:11:31 crc kubenswrapper[4840]: I1209 17:11:31.401678 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7c98c5c9b5-mz7mp" podStartSLOduration=2.40166005 podStartE2EDuration="2.40166005s" podCreationTimestamp="2025-12-09 17:11:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:11:31.395700866 +0000 UTC m=+877.386811499" watchObservedRunningTime="2025-12-09 17:11:31.40166005 +0000 UTC m=+877.392770683" Dec 09 17:11:31 crc kubenswrapper[4840]: I1209 17:11:31.583252 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr"] Dec 09 17:11:32 crc kubenswrapper[4840]: I1209 17:11:32.387507 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" event={"ID":"8a0577b2-63ef-4d11-8f94-e847b9c5a520","Type":"ContainerStarted","Data":"259e8f27c95146c7f66cea71313c3a67ff277b010a9d4674f30d25cc7e508cea"} Dec 09 17:11:33 crc kubenswrapper[4840]: I1209 17:11:33.395616 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" event={"ID":"0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b","Type":"ContainerStarted","Data":"85c2fde090374bc19d7b1e4b4b05a8b83b1042fccb415b5c42a59ac0098cd89c"} Dec 09 17:11:33 crc kubenswrapper[4840]: I1209 17:11:33.395902 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" Dec 09 17:11:33 crc kubenswrapper[4840]: I1209 17:11:33.398163 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-7s69c" event={"ID":"e80364ae-306f-494d-aaa8-da74475771d0","Type":"ContainerStarted","Data":"f78a028d2dbc249131804e989de9adc2941303b727dcf1796b8f13b344471184"} Dec 09 17:11:33 crc kubenswrapper[4840]: I1209 17:11:33.398474 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:33 crc kubenswrapper[4840]: I1209 17:11:33.399656 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf" event={"ID":"ca745f75-67ee-4db8-b8e9-49a7ab4cf95e","Type":"ContainerStarted","Data":"4cc0cb595631b76122721148cff7d532fd9149a3ebc2853ea8e2919f92ebf268"} Dec 09 17:11:33 crc kubenswrapper[4840]: I1209 17:11:33.414325 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" podStartSLOduration=1.995560944 podStartE2EDuration="4.414310911s" podCreationTimestamp="2025-12-09 17:11:29 +0000 UTC" firstStartedPulling="2025-12-09 17:11:30.628311089 +0000 UTC m=+876.619421722" lastFinishedPulling="2025-12-09 17:11:33.047061026 +0000 UTC m=+879.038171689" observedRunningTime="2025-12-09 17:11:33.412151358 +0000 UTC m=+879.403262001" watchObservedRunningTime="2025-12-09 17:11:33.414310911 +0000 UTC m=+879.405421544" Dec 09 17:11:33 crc kubenswrapper[4840]: I1209 17:11:33.442121 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-7s69c" podStartSLOduration=1.233974786 podStartE2EDuration="4.442103051s" podCreationTimestamp="2025-12-09 17:11:29 +0000 UTC" firstStartedPulling="2025-12-09 17:11:29.822647807 +0000 UTC m=+875.813758440" lastFinishedPulling="2025-12-09 17:11:33.030776072 +0000 UTC m=+879.021886705" observedRunningTime="2025-12-09 17:11:33.437573339 +0000 UTC m=+879.428684022" watchObservedRunningTime="2025-12-09 17:11:33.442103051 +0000 UTC m=+879.433213694" Dec 09 17:11:34 crc kubenswrapper[4840]: I1209 17:11:34.036776 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:11:34 crc kubenswrapper[4840]: I1209 17:11:34.036851 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:11:35 crc kubenswrapper[4840]: I1209 17:11:35.413185 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" event={"ID":"8a0577b2-63ef-4d11-8f94-e847b9c5a520","Type":"ContainerStarted","Data":"039517261bf4ccccc01093845a900f2c27f2d696d91475275d532c84d333c358"} Dec 09 17:11:35 crc kubenswrapper[4840]: I1209 17:11:35.458946 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-wblsr" podStartSLOduration=3.666691999 podStartE2EDuration="6.458927192s" podCreationTimestamp="2025-12-09 17:11:29 +0000 UTC" firstStartedPulling="2025-12-09 17:11:31.604043244 +0000 UTC m=+877.595153877" lastFinishedPulling="2025-12-09 17:11:34.396278437 +0000 UTC m=+880.387389070" observedRunningTime="2025-12-09 17:11:35.456561143 +0000 UTC m=+881.447671776" watchObservedRunningTime="2025-12-09 17:11:35.458927192 +0000 UTC m=+881.450037825" Dec 09 17:11:36 crc kubenswrapper[4840]: I1209 17:11:36.421443 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf" event={"ID":"ca745f75-67ee-4db8-b8e9-49a7ab4cf95e","Type":"ContainerStarted","Data":"044b1502e6f3263c8cd54dd2bc5d55458ae0098366d87a972484e67eb9749544"} Dec 09 17:11:36 crc kubenswrapper[4840]: I1209 17:11:36.445828 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-rtwkf" podStartSLOduration=2.304095178 podStartE2EDuration="7.44581162s" podCreationTimestamp="2025-12-09 17:11:29 +0000 UTC" firstStartedPulling="2025-12-09 17:11:30.26262171 +0000 UTC m=+876.253732343" lastFinishedPulling="2025-12-09 17:11:35.404338152 +0000 UTC m=+881.395448785" observedRunningTime="2025-12-09 17:11:36.441809924 +0000 UTC m=+882.432920587" watchObservedRunningTime="2025-12-09 17:11:36.44581162 +0000 UTC m=+882.436922243" Dec 09 17:11:39 crc kubenswrapper[4840]: I1209 17:11:39.834000 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-7s69c" Dec 09 17:11:40 crc kubenswrapper[4840]: I1209 17:11:40.046259 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:40 crc kubenswrapper[4840]: I1209 17:11:40.046633 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:40 crc kubenswrapper[4840]: I1209 17:11:40.055640 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:40 crc kubenswrapper[4840]: I1209 17:11:40.454160 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7c98c5c9b5-mz7mp" Dec 09 17:11:40 crc kubenswrapper[4840]: I1209 17:11:40.519676 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-z8p7f"] Dec 09 17:11:50 crc kubenswrapper[4840]: I1209 17:11:50.383167 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-wbtkf" Dec 09 17:12:04 crc kubenswrapper[4840]: I1209 17:12:04.036404 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:12:04 crc kubenswrapper[4840]: I1209 17:12:04.037024 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.487845 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg"] Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.489288 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.490907 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.504131 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg"] Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.591576 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-z8p7f" podUID="2c88493e-2461-4e30-b7c9-803beb3fec3b" containerName="console" containerID="cri-o://ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e" gracePeriod=15 Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.653067 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.653134 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.653193 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh28k\" (UniqueName: \"kubernetes.io/projected/9310120d-a137-4eed-aaf9-e0d4dc85376b-kube-api-access-vh28k\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.767747 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.767823 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.767880 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh28k\" (UniqueName: \"kubernetes.io/projected/9310120d-a137-4eed-aaf9-e0d4dc85376b-kube-api-access-vh28k\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.768673 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.768781 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.789154 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh28k\" (UniqueName: \"kubernetes.io/projected/9310120d-a137-4eed-aaf9-e0d4dc85376b-kube-api-access-vh28k\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.808627 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.998580 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-z8p7f_2c88493e-2461-4e30-b7c9-803beb3fec3b/console/0.log" Dec 09 17:12:05 crc kubenswrapper[4840]: I1209 17:12:05.998650 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.181539 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-oauth-serving-cert\") pod \"2c88493e-2461-4e30-b7c9-803beb3fec3b\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.181916 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-serving-cert\") pod \"2c88493e-2461-4e30-b7c9-803beb3fec3b\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.182078 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-service-ca\") pod \"2c88493e-2461-4e30-b7c9-803beb3fec3b\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.182141 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6r7sv\" (UniqueName: \"kubernetes.io/projected/2c88493e-2461-4e30-b7c9-803beb3fec3b-kube-api-access-6r7sv\") pod \"2c88493e-2461-4e30-b7c9-803beb3fec3b\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.182194 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-oauth-config\") pod \"2c88493e-2461-4e30-b7c9-803beb3fec3b\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.182246 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-config\") pod \"2c88493e-2461-4e30-b7c9-803beb3fec3b\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.182342 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-trusted-ca-bundle\") pod \"2c88493e-2461-4e30-b7c9-803beb3fec3b\" (UID: \"2c88493e-2461-4e30-b7c9-803beb3fec3b\") " Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.183452 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "2c88493e-2461-4e30-b7c9-803beb3fec3b" (UID: "2c88493e-2461-4e30-b7c9-803beb3fec3b"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.183617 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-service-ca" (OuterVolumeSpecName: "service-ca") pod "2c88493e-2461-4e30-b7c9-803beb3fec3b" (UID: "2c88493e-2461-4e30-b7c9-803beb3fec3b"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.183881 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-config" (OuterVolumeSpecName: "console-config") pod "2c88493e-2461-4e30-b7c9-803beb3fec3b" (UID: "2c88493e-2461-4e30-b7c9-803beb3fec3b"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.184380 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "2c88493e-2461-4e30-b7c9-803beb3fec3b" (UID: "2c88493e-2461-4e30-b7c9-803beb3fec3b"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.187443 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c88493e-2461-4e30-b7c9-803beb3fec3b-kube-api-access-6r7sv" (OuterVolumeSpecName: "kube-api-access-6r7sv") pod "2c88493e-2461-4e30-b7c9-803beb3fec3b" (UID: "2c88493e-2461-4e30-b7c9-803beb3fec3b"). InnerVolumeSpecName "kube-api-access-6r7sv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.187518 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "2c88493e-2461-4e30-b7c9-803beb3fec3b" (UID: "2c88493e-2461-4e30-b7c9-803beb3fec3b"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.187739 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "2c88493e-2461-4e30-b7c9-803beb3fec3b" (UID: "2c88493e-2461-4e30-b7c9-803beb3fec3b"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.284849 4840 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.284931 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6r7sv\" (UniqueName: \"kubernetes.io/projected/2c88493e-2461-4e30-b7c9-803beb3fec3b-kube-api-access-6r7sv\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.284961 4840 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.285024 4840 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.285050 4840 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.285075 4840 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2c88493e-2461-4e30-b7c9-803beb3fec3b-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.285099 4840 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c88493e-2461-4e30-b7c9-803beb3fec3b-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.305339 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg"] Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.635638 4840 generic.go:334] "Generic (PLEG): container finished" podID="9310120d-a137-4eed-aaf9-e0d4dc85376b" containerID="84cea2ef8341cac00d8d4d37365c1dca46f1fadc5ac3f4c2dcdada91f4657c01" exitCode=0 Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.635761 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" event={"ID":"9310120d-a137-4eed-aaf9-e0d4dc85376b","Type":"ContainerDied","Data":"84cea2ef8341cac00d8d4d37365c1dca46f1fadc5ac3f4c2dcdada91f4657c01"} Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.637045 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" event={"ID":"9310120d-a137-4eed-aaf9-e0d4dc85376b","Type":"ContainerStarted","Data":"d286492ac99e48c05c28e452ab2ffa4a2e71cc0a8e8b5c25f274e26b5a5fd98f"} Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.641298 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-z8p7f_2c88493e-2461-4e30-b7c9-803beb3fec3b/console/0.log" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.641369 4840 generic.go:334] "Generic (PLEG): container finished" podID="2c88493e-2461-4e30-b7c9-803beb3fec3b" containerID="ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e" exitCode=2 Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.641409 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-z8p7f" event={"ID":"2c88493e-2461-4e30-b7c9-803beb3fec3b","Type":"ContainerDied","Data":"ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e"} Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.641448 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-z8p7f" event={"ID":"2c88493e-2461-4e30-b7c9-803beb3fec3b","Type":"ContainerDied","Data":"e5fb0cad3edba8f11e30c7736ba7f8ccced9f3bb82548b7763e6b886c7591988"} Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.641480 4840 scope.go:117] "RemoveContainer" containerID="ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.641634 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-z8p7f" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.676267 4840 scope.go:117] "RemoveContainer" containerID="ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.676790 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-z8p7f"] Dec 09 17:12:06 crc kubenswrapper[4840]: E1209 17:12:06.676834 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e\": container with ID starting with ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e not found: ID does not exist" containerID="ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.676883 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e"} err="failed to get container status \"ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e\": rpc error: code = NotFound desc = could not find container \"ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e\": container with ID starting with ae8e22f0da1722c3d6c813d2bc36c8e73d3719fe50a9d34327953ed464b3998e not found: ID does not exist" Dec 09 17:12:06 crc kubenswrapper[4840]: I1209 17:12:06.682352 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-z8p7f"] Dec 09 17:12:06 crc kubenswrapper[4840]: E1209 17:12:06.687058 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c88493e_2461_4e30_b7c9_803beb3fec3b.slice\": RecentStats: unable to find data in memory cache]" Dec 09 17:12:08 crc kubenswrapper[4840]: I1209 17:12:08.620189 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c88493e-2461-4e30-b7c9-803beb3fec3b" path="/var/lib/kubelet/pods/2c88493e-2461-4e30-b7c9-803beb3fec3b/volumes" Dec 09 17:12:09 crc kubenswrapper[4840]: I1209 17:12:09.666354 4840 generic.go:334] "Generic (PLEG): container finished" podID="9310120d-a137-4eed-aaf9-e0d4dc85376b" containerID="6d4d5b15be30d2b66033ab4e50b6686728415ff4b7f188750de079e6f399c650" exitCode=0 Dec 09 17:12:09 crc kubenswrapper[4840]: I1209 17:12:09.666445 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" event={"ID":"9310120d-a137-4eed-aaf9-e0d4dc85376b","Type":"ContainerDied","Data":"6d4d5b15be30d2b66033ab4e50b6686728415ff4b7f188750de079e6f399c650"} Dec 09 17:12:10 crc kubenswrapper[4840]: I1209 17:12:10.681015 4840 generic.go:334] "Generic (PLEG): container finished" podID="9310120d-a137-4eed-aaf9-e0d4dc85376b" containerID="9808e7f029ef2429146cf0d857c350ad316622eacb0a194e50e0df0047859352" exitCode=0 Dec 09 17:12:10 crc kubenswrapper[4840]: I1209 17:12:10.681420 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" event={"ID":"9310120d-a137-4eed-aaf9-e0d4dc85376b","Type":"ContainerDied","Data":"9808e7f029ef2429146cf0d857c350ad316622eacb0a194e50e0df0047859352"} Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.006579 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.166223 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-util\") pod \"9310120d-a137-4eed-aaf9-e0d4dc85376b\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.166570 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vh28k\" (UniqueName: \"kubernetes.io/projected/9310120d-a137-4eed-aaf9-e0d4dc85376b-kube-api-access-vh28k\") pod \"9310120d-a137-4eed-aaf9-e0d4dc85376b\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.166685 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-bundle\") pod \"9310120d-a137-4eed-aaf9-e0d4dc85376b\" (UID: \"9310120d-a137-4eed-aaf9-e0d4dc85376b\") " Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.168108 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-bundle" (OuterVolumeSpecName: "bundle") pod "9310120d-a137-4eed-aaf9-e0d4dc85376b" (UID: "9310120d-a137-4eed-aaf9-e0d4dc85376b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.176679 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9310120d-a137-4eed-aaf9-e0d4dc85376b-kube-api-access-vh28k" (OuterVolumeSpecName: "kube-api-access-vh28k") pod "9310120d-a137-4eed-aaf9-e0d4dc85376b" (UID: "9310120d-a137-4eed-aaf9-e0d4dc85376b"). InnerVolumeSpecName "kube-api-access-vh28k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.197081 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-util" (OuterVolumeSpecName: "util") pod "9310120d-a137-4eed-aaf9-e0d4dc85376b" (UID: "9310120d-a137-4eed-aaf9-e0d4dc85376b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.268563 4840 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.268594 4840 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9310120d-a137-4eed-aaf9-e0d4dc85376b-util\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.268603 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vh28k\" (UniqueName: \"kubernetes.io/projected/9310120d-a137-4eed-aaf9-e0d4dc85376b-kube-api-access-vh28k\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.697454 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" event={"ID":"9310120d-a137-4eed-aaf9-e0d4dc85376b","Type":"ContainerDied","Data":"d286492ac99e48c05c28e452ab2ffa4a2e71cc0a8e8b5c25f274e26b5a5fd98f"} Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.697531 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d286492ac99e48c05c28e452ab2ffa4a2e71cc0a8e8b5c25f274e26b5a5fd98f" Dec 09 17:12:12 crc kubenswrapper[4840]: I1209 17:12:12.697531 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.039551 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nbnvx"] Dec 09 17:12:18 crc kubenswrapper[4840]: E1209 17:12:18.040465 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9310120d-a137-4eed-aaf9-e0d4dc85376b" containerName="extract" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.040485 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9310120d-a137-4eed-aaf9-e0d4dc85376b" containerName="extract" Dec 09 17:12:18 crc kubenswrapper[4840]: E1209 17:12:18.040498 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9310120d-a137-4eed-aaf9-e0d4dc85376b" containerName="pull" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.040506 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9310120d-a137-4eed-aaf9-e0d4dc85376b" containerName="pull" Dec 09 17:12:18 crc kubenswrapper[4840]: E1209 17:12:18.040523 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9310120d-a137-4eed-aaf9-e0d4dc85376b" containerName="util" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.040531 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9310120d-a137-4eed-aaf9-e0d4dc85376b" containerName="util" Dec 09 17:12:18 crc kubenswrapper[4840]: E1209 17:12:18.040546 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c88493e-2461-4e30-b7c9-803beb3fec3b" containerName="console" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.040554 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c88493e-2461-4e30-b7c9-803beb3fec3b" containerName="console" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.040703 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c88493e-2461-4e30-b7c9-803beb3fec3b" containerName="console" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.040732 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="9310120d-a137-4eed-aaf9-e0d4dc85376b" containerName="extract" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.042999 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.049462 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nbnvx"] Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.142462 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-utilities\") pod \"redhat-marketplace-nbnvx\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.142533 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-catalog-content\") pod \"redhat-marketplace-nbnvx\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.142722 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97trj\" (UniqueName: \"kubernetes.io/projected/6b76aa4d-818f-4e97-86cb-d24b84829dd7-kube-api-access-97trj\") pod \"redhat-marketplace-nbnvx\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.243905 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-utilities\") pod \"redhat-marketplace-nbnvx\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.244008 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-catalog-content\") pod \"redhat-marketplace-nbnvx\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.244047 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97trj\" (UniqueName: \"kubernetes.io/projected/6b76aa4d-818f-4e97-86cb-d24b84829dd7-kube-api-access-97trj\") pod \"redhat-marketplace-nbnvx\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.244784 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-catalog-content\") pod \"redhat-marketplace-nbnvx\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.244817 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-utilities\") pod \"redhat-marketplace-nbnvx\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.262984 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97trj\" (UniqueName: \"kubernetes.io/projected/6b76aa4d-818f-4e97-86cb-d24b84829dd7-kube-api-access-97trj\") pod \"redhat-marketplace-nbnvx\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.365178 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.648477 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nbnvx"] Dec 09 17:12:18 crc kubenswrapper[4840]: I1209 17:12:18.736914 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nbnvx" event={"ID":"6b76aa4d-818f-4e97-86cb-d24b84829dd7","Type":"ContainerStarted","Data":"0ef0464d512cf9806a3c0e20decf366ce683f3f0d0768ba26bd6886aabdead11"} Dec 09 17:12:19 crc kubenswrapper[4840]: I1209 17:12:19.745880 4840 generic.go:334] "Generic (PLEG): container finished" podID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" containerID="fa179c75419d2ede367fc60cd880916fead52f2e22a416e70542c40956a18a7f" exitCode=0 Dec 09 17:12:19 crc kubenswrapper[4840]: I1209 17:12:19.745933 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nbnvx" event={"ID":"6b76aa4d-818f-4e97-86cb-d24b84829dd7","Type":"ContainerDied","Data":"fa179c75419d2ede367fc60cd880916fead52f2e22a416e70542c40956a18a7f"} Dec 09 17:12:19 crc kubenswrapper[4840]: I1209 17:12:19.839578 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vsp52"] Dec 09 17:12:19 crc kubenswrapper[4840]: I1209 17:12:19.841297 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:19 crc kubenswrapper[4840]: I1209 17:12:19.851932 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vsp52"] Dec 09 17:12:19 crc kubenswrapper[4840]: I1209 17:12:19.965374 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-catalog-content\") pod \"certified-operators-vsp52\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:19 crc kubenswrapper[4840]: I1209 17:12:19.965439 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdfvw\" (UniqueName: \"kubernetes.io/projected/57182b06-26ff-496b-9289-665447e61e19-kube-api-access-xdfvw\") pod \"certified-operators-vsp52\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:19 crc kubenswrapper[4840]: I1209 17:12:19.965500 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-utilities\") pod \"certified-operators-vsp52\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.066283 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdfvw\" (UniqueName: \"kubernetes.io/projected/57182b06-26ff-496b-9289-665447e61e19-kube-api-access-xdfvw\") pod \"certified-operators-vsp52\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.066367 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-utilities\") pod \"certified-operators-vsp52\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.066402 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-catalog-content\") pod \"certified-operators-vsp52\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.066818 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-catalog-content\") pod \"certified-operators-vsp52\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.067377 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-utilities\") pod \"certified-operators-vsp52\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.078593 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz"] Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.079293 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.083348 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.083522 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-pwbq7" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.083525 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.083659 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.083782 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.096666 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdfvw\" (UniqueName: \"kubernetes.io/projected/57182b06-26ff-496b-9289-665447e61e19-kube-api-access-xdfvw\") pod \"certified-operators-vsp52\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.101093 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz"] Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.160723 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.167535 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3eb49b5-e79c-4d7f-8395-18217dbcc4a9-apiservice-cert\") pod \"metallb-operator-controller-manager-5fff64b888-svqsz\" (UID: \"c3eb49b5-e79c-4d7f-8395-18217dbcc4a9\") " pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.167651 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-745pn\" (UniqueName: \"kubernetes.io/projected/c3eb49b5-e79c-4d7f-8395-18217dbcc4a9-kube-api-access-745pn\") pod \"metallb-operator-controller-manager-5fff64b888-svqsz\" (UID: \"c3eb49b5-e79c-4d7f-8395-18217dbcc4a9\") " pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.167677 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3eb49b5-e79c-4d7f-8395-18217dbcc4a9-webhook-cert\") pod \"metallb-operator-controller-manager-5fff64b888-svqsz\" (UID: \"c3eb49b5-e79c-4d7f-8395-18217dbcc4a9\") " pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.268704 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3eb49b5-e79c-4d7f-8395-18217dbcc4a9-apiservice-cert\") pod \"metallb-operator-controller-manager-5fff64b888-svqsz\" (UID: \"c3eb49b5-e79c-4d7f-8395-18217dbcc4a9\") " pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.269033 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-745pn\" (UniqueName: \"kubernetes.io/projected/c3eb49b5-e79c-4d7f-8395-18217dbcc4a9-kube-api-access-745pn\") pod \"metallb-operator-controller-manager-5fff64b888-svqsz\" (UID: \"c3eb49b5-e79c-4d7f-8395-18217dbcc4a9\") " pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.269056 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3eb49b5-e79c-4d7f-8395-18217dbcc4a9-webhook-cert\") pod \"metallb-operator-controller-manager-5fff64b888-svqsz\" (UID: \"c3eb49b5-e79c-4d7f-8395-18217dbcc4a9\") " pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.272636 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3eb49b5-e79c-4d7f-8395-18217dbcc4a9-webhook-cert\") pod \"metallb-operator-controller-manager-5fff64b888-svqsz\" (UID: \"c3eb49b5-e79c-4d7f-8395-18217dbcc4a9\") " pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.288911 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3eb49b5-e79c-4d7f-8395-18217dbcc4a9-apiservice-cert\") pod \"metallb-operator-controller-manager-5fff64b888-svqsz\" (UID: \"c3eb49b5-e79c-4d7f-8395-18217dbcc4a9\") " pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.313652 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-745pn\" (UniqueName: \"kubernetes.io/projected/c3eb49b5-e79c-4d7f-8395-18217dbcc4a9-kube-api-access-745pn\") pod \"metallb-operator-controller-manager-5fff64b888-svqsz\" (UID: \"c3eb49b5-e79c-4d7f-8395-18217dbcc4a9\") " pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.428642 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.500212 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz"] Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.500918 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.503407 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.503630 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.503754 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-xlxtc" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.558009 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz"] Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.673523 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8c103b24-d2e6-413d-a074-60609d33c8fd-webhook-cert\") pod \"metallb-operator-webhook-server-f9dc74597-72bpz\" (UID: \"8c103b24-d2e6-413d-a074-60609d33c8fd\") " pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.673581 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8c103b24-d2e6-413d-a074-60609d33c8fd-apiservice-cert\") pod \"metallb-operator-webhook-server-f9dc74597-72bpz\" (UID: \"8c103b24-d2e6-413d-a074-60609d33c8fd\") " pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.673617 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8g66\" (UniqueName: \"kubernetes.io/projected/8c103b24-d2e6-413d-a074-60609d33c8fd-kube-api-access-x8g66\") pod \"metallb-operator-webhook-server-f9dc74597-72bpz\" (UID: \"8c103b24-d2e6-413d-a074-60609d33c8fd\") " pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.703894 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vsp52"] Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.771567 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsp52" event={"ID":"57182b06-26ff-496b-9289-665447e61e19","Type":"ContainerStarted","Data":"e7c4b1a3d83d71cc2ff8188d2668c7f6d7ad6ebe10a94afca4fce786a71a58ea"} Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.774356 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8g66\" (UniqueName: \"kubernetes.io/projected/8c103b24-d2e6-413d-a074-60609d33c8fd-kube-api-access-x8g66\") pod \"metallb-operator-webhook-server-f9dc74597-72bpz\" (UID: \"8c103b24-d2e6-413d-a074-60609d33c8fd\") " pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.774449 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8c103b24-d2e6-413d-a074-60609d33c8fd-webhook-cert\") pod \"metallb-operator-webhook-server-f9dc74597-72bpz\" (UID: \"8c103b24-d2e6-413d-a074-60609d33c8fd\") " pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.774477 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8c103b24-d2e6-413d-a074-60609d33c8fd-apiservice-cert\") pod \"metallb-operator-webhook-server-f9dc74597-72bpz\" (UID: \"8c103b24-d2e6-413d-a074-60609d33c8fd\") " pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.781545 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8c103b24-d2e6-413d-a074-60609d33c8fd-webhook-cert\") pod \"metallb-operator-webhook-server-f9dc74597-72bpz\" (UID: \"8c103b24-d2e6-413d-a074-60609d33c8fd\") " pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.787741 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8c103b24-d2e6-413d-a074-60609d33c8fd-apiservice-cert\") pod \"metallb-operator-webhook-server-f9dc74597-72bpz\" (UID: \"8c103b24-d2e6-413d-a074-60609d33c8fd\") " pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.790827 4840 generic.go:334] "Generic (PLEG): container finished" podID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" containerID="7c1d61d4aab062a04cf6953fee383f2008922ee278a46315cc1770be69a56d22" exitCode=0 Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.790876 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nbnvx" event={"ID":"6b76aa4d-818f-4e97-86cb-d24b84829dd7","Type":"ContainerDied","Data":"7c1d61d4aab062a04cf6953fee383f2008922ee278a46315cc1770be69a56d22"} Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.801346 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8g66\" (UniqueName: \"kubernetes.io/projected/8c103b24-d2e6-413d-a074-60609d33c8fd-kube-api-access-x8g66\") pod \"metallb-operator-webhook-server-f9dc74597-72bpz\" (UID: \"8c103b24-d2e6-413d-a074-60609d33c8fd\") " pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.812546 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:20 crc kubenswrapper[4840]: I1209 17:12:20.974393 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz"] Dec 09 17:12:21 crc kubenswrapper[4840]: W1209 17:12:21.005699 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc3eb49b5_e79c_4d7f_8395_18217dbcc4a9.slice/crio-4e96651a9b6a554d0e8ebd53a7add30e07bab02c5494d74fdef75d7e7020d00b WatchSource:0}: Error finding container 4e96651a9b6a554d0e8ebd53a7add30e07bab02c5494d74fdef75d7e7020d00b: Status 404 returned error can't find the container with id 4e96651a9b6a554d0e8ebd53a7add30e07bab02c5494d74fdef75d7e7020d00b Dec 09 17:12:21 crc kubenswrapper[4840]: I1209 17:12:21.241350 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz"] Dec 09 17:12:21 crc kubenswrapper[4840]: W1209 17:12:21.248275 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c103b24_d2e6_413d_a074_60609d33c8fd.slice/crio-d86774ab0e2fc08641b2353c6a69654131b91b6af1c2a4a3131113141aa1b09c WatchSource:0}: Error finding container d86774ab0e2fc08641b2353c6a69654131b91b6af1c2a4a3131113141aa1b09c: Status 404 returned error can't find the container with id d86774ab0e2fc08641b2353c6a69654131b91b6af1c2a4a3131113141aa1b09c Dec 09 17:12:21 crc kubenswrapper[4840]: I1209 17:12:21.800932 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" event={"ID":"c3eb49b5-e79c-4d7f-8395-18217dbcc4a9","Type":"ContainerStarted","Data":"4e96651a9b6a554d0e8ebd53a7add30e07bab02c5494d74fdef75d7e7020d00b"} Dec 09 17:12:21 crc kubenswrapper[4840]: I1209 17:12:21.803265 4840 generic.go:334] "Generic (PLEG): container finished" podID="57182b06-26ff-496b-9289-665447e61e19" containerID="5966b6eaa66c5c7e1acc9e798e08c674647d6c781ff79493ca88a8a4027b1bce" exitCode=0 Dec 09 17:12:21 crc kubenswrapper[4840]: I1209 17:12:21.803352 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsp52" event={"ID":"57182b06-26ff-496b-9289-665447e61e19","Type":"ContainerDied","Data":"5966b6eaa66c5c7e1acc9e798e08c674647d6c781ff79493ca88a8a4027b1bce"} Dec 09 17:12:21 crc kubenswrapper[4840]: I1209 17:12:21.805585 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" event={"ID":"8c103b24-d2e6-413d-a074-60609d33c8fd","Type":"ContainerStarted","Data":"d86774ab0e2fc08641b2353c6a69654131b91b6af1c2a4a3131113141aa1b09c"} Dec 09 17:12:21 crc kubenswrapper[4840]: I1209 17:12:21.808953 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nbnvx" event={"ID":"6b76aa4d-818f-4e97-86cb-d24b84829dd7","Type":"ContainerStarted","Data":"dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421"} Dec 09 17:12:21 crc kubenswrapper[4840]: I1209 17:12:21.850758 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nbnvx" podStartSLOduration=2.3241428920000002 podStartE2EDuration="3.850739857s" podCreationTimestamp="2025-12-09 17:12:18 +0000 UTC" firstStartedPulling="2025-12-09 17:12:19.74749985 +0000 UTC m=+925.738610483" lastFinishedPulling="2025-12-09 17:12:21.274096815 +0000 UTC m=+927.265207448" observedRunningTime="2025-12-09 17:12:21.850179701 +0000 UTC m=+927.841290364" watchObservedRunningTime="2025-12-09 17:12:21.850739857 +0000 UTC m=+927.841850500" Dec 09 17:12:22 crc kubenswrapper[4840]: I1209 17:12:22.842074 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsp52" event={"ID":"57182b06-26ff-496b-9289-665447e61e19","Type":"ContainerStarted","Data":"ace7460cefbae64b1d2c82f148a128685e50f009fc07432491f5e75e3b75c3b2"} Dec 09 17:12:23 crc kubenswrapper[4840]: I1209 17:12:23.859091 4840 generic.go:334] "Generic (PLEG): container finished" podID="57182b06-26ff-496b-9289-665447e61e19" containerID="ace7460cefbae64b1d2c82f148a128685e50f009fc07432491f5e75e3b75c3b2" exitCode=0 Dec 09 17:12:23 crc kubenswrapper[4840]: I1209 17:12:23.859159 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsp52" event={"ID":"57182b06-26ff-496b-9289-665447e61e19","Type":"ContainerDied","Data":"ace7460cefbae64b1d2c82f148a128685e50f009fc07432491f5e75e3b75c3b2"} Dec 09 17:12:27 crc kubenswrapper[4840]: I1209 17:12:27.899382 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsp52" event={"ID":"57182b06-26ff-496b-9289-665447e61e19","Type":"ContainerStarted","Data":"3b15708ee0d694e419fa19957ab400437faa770eb22e75419ac3a68be28df292"} Dec 09 17:12:27 crc kubenswrapper[4840]: I1209 17:12:27.902364 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" event={"ID":"8c103b24-d2e6-413d-a074-60609d33c8fd","Type":"ContainerStarted","Data":"9312983f95d40aa92d70c3a9570671f7a58826c2ac5f40a4584e91347a2b9439"} Dec 09 17:12:27 crc kubenswrapper[4840]: I1209 17:12:27.902462 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:27 crc kubenswrapper[4840]: I1209 17:12:27.904027 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" event={"ID":"c3eb49b5-e79c-4d7f-8395-18217dbcc4a9","Type":"ContainerStarted","Data":"b8b11496a4fc0ddd6bea2373574653700784f80aded5713c7f61efc7c59864f2"} Dec 09 17:12:27 crc kubenswrapper[4840]: I1209 17:12:27.904487 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:12:27 crc kubenswrapper[4840]: I1209 17:12:27.920929 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vsp52" podStartSLOduration=3.117038102 podStartE2EDuration="8.920910086s" podCreationTimestamp="2025-12-09 17:12:19 +0000 UTC" firstStartedPulling="2025-12-09 17:12:21.805548901 +0000 UTC m=+927.796659534" lastFinishedPulling="2025-12-09 17:12:27.609420885 +0000 UTC m=+933.600531518" observedRunningTime="2025-12-09 17:12:27.916371004 +0000 UTC m=+933.907481657" watchObservedRunningTime="2025-12-09 17:12:27.920910086 +0000 UTC m=+933.912020729" Dec 09 17:12:27 crc kubenswrapper[4840]: I1209 17:12:27.946539 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" podStartSLOduration=1.346283788 podStartE2EDuration="7.946515742s" podCreationTimestamp="2025-12-09 17:12:20 +0000 UTC" firstStartedPulling="2025-12-09 17:12:21.009071128 +0000 UTC m=+927.000181761" lastFinishedPulling="2025-12-09 17:12:27.609303082 +0000 UTC m=+933.600413715" observedRunningTime="2025-12-09 17:12:27.944458262 +0000 UTC m=+933.935568895" watchObservedRunningTime="2025-12-09 17:12:27.946515742 +0000 UTC m=+933.937626375" Dec 09 17:12:27 crc kubenswrapper[4840]: I1209 17:12:27.966774 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" podStartSLOduration=1.5956968310000001 podStartE2EDuration="7.966756011s" podCreationTimestamp="2025-12-09 17:12:20 +0000 UTC" firstStartedPulling="2025-12-09 17:12:21.254059161 +0000 UTC m=+927.245169795" lastFinishedPulling="2025-12-09 17:12:27.625118342 +0000 UTC m=+933.616228975" observedRunningTime="2025-12-09 17:12:27.963417964 +0000 UTC m=+933.954528607" watchObservedRunningTime="2025-12-09 17:12:27.966756011 +0000 UTC m=+933.957866654" Dec 09 17:12:28 crc kubenswrapper[4840]: I1209 17:12:28.365533 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:28 crc kubenswrapper[4840]: I1209 17:12:28.365589 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:28 crc kubenswrapper[4840]: I1209 17:12:28.405139 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:28 crc kubenswrapper[4840]: I1209 17:12:28.945183 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.038730 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-58zsw"] Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.040341 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.052795 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-58zsw"] Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.208179 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-catalog-content\") pod \"community-operators-58zsw\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.208253 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-utilities\") pod \"community-operators-58zsw\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.208286 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzsds\" (UniqueName: \"kubernetes.io/projected/59e87d58-ff42-4c17-88f8-70aba10ea8bd-kube-api-access-vzsds\") pod \"community-operators-58zsw\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.309530 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzsds\" (UniqueName: \"kubernetes.io/projected/59e87d58-ff42-4c17-88f8-70aba10ea8bd-kube-api-access-vzsds\") pod \"community-operators-58zsw\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.309626 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-catalog-content\") pod \"community-operators-58zsw\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.309672 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-utilities\") pod \"community-operators-58zsw\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.310085 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-utilities\") pod \"community-operators-58zsw\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.310404 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-catalog-content\") pod \"community-operators-58zsw\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.328778 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzsds\" (UniqueName: \"kubernetes.io/projected/59e87d58-ff42-4c17-88f8-70aba10ea8bd-kube-api-access-vzsds\") pod \"community-operators-58zsw\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.356223 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.604471 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-58zsw"] Dec 09 17:12:29 crc kubenswrapper[4840]: W1209 17:12:29.607422 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59e87d58_ff42_4c17_88f8_70aba10ea8bd.slice/crio-d25060263545141c794c3aa97a24e041ca4014e719f9f02224f1a9fb3d455edf WatchSource:0}: Error finding container d25060263545141c794c3aa97a24e041ca4014e719f9f02224f1a9fb3d455edf: Status 404 returned error can't find the container with id d25060263545141c794c3aa97a24e041ca4014e719f9f02224f1a9fb3d455edf Dec 09 17:12:29 crc kubenswrapper[4840]: I1209 17:12:29.915245 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58zsw" event={"ID":"59e87d58-ff42-4c17-88f8-70aba10ea8bd","Type":"ContainerStarted","Data":"d25060263545141c794c3aa97a24e041ca4014e719f9f02224f1a9fb3d455edf"} Dec 09 17:12:30 crc kubenswrapper[4840]: I1209 17:12:30.161128 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:30 crc kubenswrapper[4840]: I1209 17:12:30.161201 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:30 crc kubenswrapper[4840]: I1209 17:12:30.204446 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:30 crc kubenswrapper[4840]: I1209 17:12:30.923897 4840 generic.go:334] "Generic (PLEG): container finished" podID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" containerID="6258ff467436dc4e2ab866230024a2d44bdd903a05c981d0efe9b8f5412d33d8" exitCode=0 Dec 09 17:12:30 crc kubenswrapper[4840]: I1209 17:12:30.924004 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58zsw" event={"ID":"59e87d58-ff42-4c17-88f8-70aba10ea8bd","Type":"ContainerDied","Data":"6258ff467436dc4e2ab866230024a2d44bdd903a05c981d0efe9b8f5412d33d8"} Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.434377 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nbnvx"] Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.435052 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nbnvx" podUID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" containerName="registry-server" containerID="cri-o://dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421" gracePeriod=2 Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.815187 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.934773 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58zsw" event={"ID":"59e87d58-ff42-4c17-88f8-70aba10ea8bd","Type":"ContainerStarted","Data":"a4a73b420263ed20982e3fc28c2114dd415599fb626f2bf78dc9795093b6a964"} Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.936922 4840 generic.go:334] "Generic (PLEG): container finished" podID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" containerID="dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421" exitCode=0 Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.936994 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nbnvx" event={"ID":"6b76aa4d-818f-4e97-86cb-d24b84829dd7","Type":"ContainerDied","Data":"dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421"} Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.937028 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nbnvx" event={"ID":"6b76aa4d-818f-4e97-86cb-d24b84829dd7","Type":"ContainerDied","Data":"0ef0464d512cf9806a3c0e20decf366ce683f3f0d0768ba26bd6886aabdead11"} Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.937049 4840 scope.go:117] "RemoveContainer" containerID="dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421" Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.937177 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nbnvx" Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.948846 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-catalog-content\") pod \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.949234 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-utilities\") pod \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.953679 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-utilities" (OuterVolumeSpecName: "utilities") pod "6b76aa4d-818f-4e97-86cb-d24b84829dd7" (UID: "6b76aa4d-818f-4e97-86cb-d24b84829dd7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.953844 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-97trj\" (UniqueName: \"kubernetes.io/projected/6b76aa4d-818f-4e97-86cb-d24b84829dd7-kube-api-access-97trj\") pod \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\" (UID: \"6b76aa4d-818f-4e97-86cb-d24b84829dd7\") " Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.954245 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.955627 4840 scope.go:117] "RemoveContainer" containerID="7c1d61d4aab062a04cf6953fee383f2008922ee278a46315cc1770be69a56d22" Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.986192 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b76aa4d-818f-4e97-86cb-d24b84829dd7-kube-api-access-97trj" (OuterVolumeSpecName: "kube-api-access-97trj") pod "6b76aa4d-818f-4e97-86cb-d24b84829dd7" (UID: "6b76aa4d-818f-4e97-86cb-d24b84829dd7"). InnerVolumeSpecName "kube-api-access-97trj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:12:31 crc kubenswrapper[4840]: I1209 17:12:31.987274 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6b76aa4d-818f-4e97-86cb-d24b84829dd7" (UID: "6b76aa4d-818f-4e97-86cb-d24b84829dd7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.000824 4840 scope.go:117] "RemoveContainer" containerID="fa179c75419d2ede367fc60cd880916fead52f2e22a416e70542c40956a18a7f" Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.014927 4840 scope.go:117] "RemoveContainer" containerID="dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421" Dec 09 17:12:32 crc kubenswrapper[4840]: E1209 17:12:32.015461 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421\": container with ID starting with dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421 not found: ID does not exist" containerID="dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421" Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.015526 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421"} err="failed to get container status \"dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421\": rpc error: code = NotFound desc = could not find container \"dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421\": container with ID starting with dd216e8631a7dacb698e24cb67b46744bc7e3847f83467eb0130698df4cac421 not found: ID does not exist" Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.015555 4840 scope.go:117] "RemoveContainer" containerID="7c1d61d4aab062a04cf6953fee383f2008922ee278a46315cc1770be69a56d22" Dec 09 17:12:32 crc kubenswrapper[4840]: E1209 17:12:32.015867 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c1d61d4aab062a04cf6953fee383f2008922ee278a46315cc1770be69a56d22\": container with ID starting with 7c1d61d4aab062a04cf6953fee383f2008922ee278a46315cc1770be69a56d22 not found: ID does not exist" containerID="7c1d61d4aab062a04cf6953fee383f2008922ee278a46315cc1770be69a56d22" Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.015898 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c1d61d4aab062a04cf6953fee383f2008922ee278a46315cc1770be69a56d22"} err="failed to get container status \"7c1d61d4aab062a04cf6953fee383f2008922ee278a46315cc1770be69a56d22\": rpc error: code = NotFound desc = could not find container \"7c1d61d4aab062a04cf6953fee383f2008922ee278a46315cc1770be69a56d22\": container with ID starting with 7c1d61d4aab062a04cf6953fee383f2008922ee278a46315cc1770be69a56d22 not found: ID does not exist" Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.015924 4840 scope.go:117] "RemoveContainer" containerID="fa179c75419d2ede367fc60cd880916fead52f2e22a416e70542c40956a18a7f" Dec 09 17:12:32 crc kubenswrapper[4840]: E1209 17:12:32.016186 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa179c75419d2ede367fc60cd880916fead52f2e22a416e70542c40956a18a7f\": container with ID starting with fa179c75419d2ede367fc60cd880916fead52f2e22a416e70542c40956a18a7f not found: ID does not exist" containerID="fa179c75419d2ede367fc60cd880916fead52f2e22a416e70542c40956a18a7f" Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.016213 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa179c75419d2ede367fc60cd880916fead52f2e22a416e70542c40956a18a7f"} err="failed to get container status \"fa179c75419d2ede367fc60cd880916fead52f2e22a416e70542c40956a18a7f\": rpc error: code = NotFound desc = could not find container \"fa179c75419d2ede367fc60cd880916fead52f2e22a416e70542c40956a18a7f\": container with ID starting with fa179c75419d2ede367fc60cd880916fead52f2e22a416e70542c40956a18a7f not found: ID does not exist" Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.055489 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-97trj\" (UniqueName: \"kubernetes.io/projected/6b76aa4d-818f-4e97-86cb-d24b84829dd7-kube-api-access-97trj\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.055523 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b76aa4d-818f-4e97-86cb-d24b84829dd7-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.267509 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nbnvx"] Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.272309 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nbnvx"] Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.615163 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" path="/var/lib/kubelet/pods/6b76aa4d-818f-4e97-86cb-d24b84829dd7/volumes" Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.943890 4840 generic.go:334] "Generic (PLEG): container finished" podID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" containerID="a4a73b420263ed20982e3fc28c2114dd415599fb626f2bf78dc9795093b6a964" exitCode=0 Dec 09 17:12:32 crc kubenswrapper[4840]: I1209 17:12:32.943984 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58zsw" event={"ID":"59e87d58-ff42-4c17-88f8-70aba10ea8bd","Type":"ContainerDied","Data":"a4a73b420263ed20982e3fc28c2114dd415599fb626f2bf78dc9795093b6a964"} Dec 09 17:12:33 crc kubenswrapper[4840]: I1209 17:12:33.956064 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58zsw" event={"ID":"59e87d58-ff42-4c17-88f8-70aba10ea8bd","Type":"ContainerStarted","Data":"5a7a3994684b8e340ebe2b99d5f1a4a411559df61f736a50695a61c381552e0b"} Dec 09 17:12:33 crc kubenswrapper[4840]: I1209 17:12:33.982895 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-58zsw" podStartSLOduration=2.489240868 podStartE2EDuration="4.982869654s" podCreationTimestamp="2025-12-09 17:12:29 +0000 UTC" firstStartedPulling="2025-12-09 17:12:30.925691057 +0000 UTC m=+936.916801690" lastFinishedPulling="2025-12-09 17:12:33.419319843 +0000 UTC m=+939.410430476" observedRunningTime="2025-12-09 17:12:33.976739526 +0000 UTC m=+939.967850209" watchObservedRunningTime="2025-12-09 17:12:33.982869654 +0000 UTC m=+939.973980287" Dec 09 17:12:34 crc kubenswrapper[4840]: I1209 17:12:34.036095 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:12:34 crc kubenswrapper[4840]: I1209 17:12:34.036164 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:12:34 crc kubenswrapper[4840]: I1209 17:12:34.036213 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:12:34 crc kubenswrapper[4840]: I1209 17:12:34.036827 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a086878e48cc9f08081b2c7308c271c8366e842b5fcd3ad6accfb78d0872a65b"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:12:34 crc kubenswrapper[4840]: I1209 17:12:34.036880 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://a086878e48cc9f08081b2c7308c271c8366e842b5fcd3ad6accfb78d0872a65b" gracePeriod=600 Dec 09 17:12:34 crc kubenswrapper[4840]: I1209 17:12:34.963598 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="a086878e48cc9f08081b2c7308c271c8366e842b5fcd3ad6accfb78d0872a65b" exitCode=0 Dec 09 17:12:34 crc kubenswrapper[4840]: I1209 17:12:34.964408 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"a086878e48cc9f08081b2c7308c271c8366e842b5fcd3ad6accfb78d0872a65b"} Dec 09 17:12:34 crc kubenswrapper[4840]: I1209 17:12:34.964432 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"e12e998619dcfb414f5abc4e5512aafccfda5811abde023bf5ca07762965de9f"} Dec 09 17:12:34 crc kubenswrapper[4840]: I1209 17:12:34.964448 4840 scope.go:117] "RemoveContainer" containerID="0f2e3a684de848e9b5fa655a4ae5fc5ca866cec15401f4140baa9862458b991d" Dec 09 17:12:39 crc kubenswrapper[4840]: I1209 17:12:39.357023 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:39 crc kubenswrapper[4840]: I1209 17:12:39.357525 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:39 crc kubenswrapper[4840]: I1209 17:12:39.408906 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:40 crc kubenswrapper[4840]: I1209 17:12:40.031890 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:40 crc kubenswrapper[4840]: I1209 17:12:40.199090 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:40 crc kubenswrapper[4840]: I1209 17:12:40.817435 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-f9dc74597-72bpz" Dec 09 17:12:42 crc kubenswrapper[4840]: I1209 17:12:42.426523 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-58zsw"] Dec 09 17:12:42 crc kubenswrapper[4840]: I1209 17:12:42.426743 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-58zsw" podUID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" containerName="registry-server" containerID="cri-o://5a7a3994684b8e340ebe2b99d5f1a4a411559df61f736a50695a61c381552e0b" gracePeriod=2 Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.028692 4840 generic.go:334] "Generic (PLEG): container finished" podID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" containerID="5a7a3994684b8e340ebe2b99d5f1a4a411559df61f736a50695a61c381552e0b" exitCode=0 Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.028743 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58zsw" event={"ID":"59e87d58-ff42-4c17-88f8-70aba10ea8bd","Type":"ContainerDied","Data":"5a7a3994684b8e340ebe2b99d5f1a4a411559df61f736a50695a61c381552e0b"} Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.322586 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.407055 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vzsds\" (UniqueName: \"kubernetes.io/projected/59e87d58-ff42-4c17-88f8-70aba10ea8bd-kube-api-access-vzsds\") pod \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.407125 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-utilities\") pod \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.407230 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-catalog-content\") pod \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\" (UID: \"59e87d58-ff42-4c17-88f8-70aba10ea8bd\") " Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.407991 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-utilities" (OuterVolumeSpecName: "utilities") pod "59e87d58-ff42-4c17-88f8-70aba10ea8bd" (UID: "59e87d58-ff42-4c17-88f8-70aba10ea8bd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.417286 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59e87d58-ff42-4c17-88f8-70aba10ea8bd-kube-api-access-vzsds" (OuterVolumeSpecName: "kube-api-access-vzsds") pod "59e87d58-ff42-4c17-88f8-70aba10ea8bd" (UID: "59e87d58-ff42-4c17-88f8-70aba10ea8bd"). InnerVolumeSpecName "kube-api-access-vzsds". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.464912 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "59e87d58-ff42-4c17-88f8-70aba10ea8bd" (UID: "59e87d58-ff42-4c17-88f8-70aba10ea8bd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.508924 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.508961 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vzsds\" (UniqueName: \"kubernetes.io/projected/59e87d58-ff42-4c17-88f8-70aba10ea8bd-kube-api-access-vzsds\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.508987 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59e87d58-ff42-4c17-88f8-70aba10ea8bd-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.830377 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vsp52"] Dec 09 17:12:43 crc kubenswrapper[4840]: I1209 17:12:43.830956 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vsp52" podUID="57182b06-26ff-496b-9289-665447e61e19" containerName="registry-server" containerID="cri-o://3b15708ee0d694e419fa19957ab400437faa770eb22e75419ac3a68be28df292" gracePeriod=2 Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.038574 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58zsw" event={"ID":"59e87d58-ff42-4c17-88f8-70aba10ea8bd","Type":"ContainerDied","Data":"d25060263545141c794c3aa97a24e041ca4014e719f9f02224f1a9fb3d455edf"} Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.038636 4840 scope.go:117] "RemoveContainer" containerID="5a7a3994684b8e340ebe2b99d5f1a4a411559df61f736a50695a61c381552e0b" Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.038767 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58zsw" Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.043785 4840 generic.go:334] "Generic (PLEG): container finished" podID="57182b06-26ff-496b-9289-665447e61e19" containerID="3b15708ee0d694e419fa19957ab400437faa770eb22e75419ac3a68be28df292" exitCode=0 Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.043832 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsp52" event={"ID":"57182b06-26ff-496b-9289-665447e61e19","Type":"ContainerDied","Data":"3b15708ee0d694e419fa19957ab400437faa770eb22e75419ac3a68be28df292"} Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.070647 4840 scope.go:117] "RemoveContainer" containerID="a4a73b420263ed20982e3fc28c2114dd415599fb626f2bf78dc9795093b6a964" Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.095226 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-58zsw"] Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.109756 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-58zsw"] Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.117762 4840 scope.go:117] "RemoveContainer" containerID="6258ff467436dc4e2ab866230024a2d44bdd903a05c981d0efe9b8f5412d33d8" Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.249023 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.319306 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-utilities\") pod \"57182b06-26ff-496b-9289-665447e61e19\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.319423 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdfvw\" (UniqueName: \"kubernetes.io/projected/57182b06-26ff-496b-9289-665447e61e19-kube-api-access-xdfvw\") pod \"57182b06-26ff-496b-9289-665447e61e19\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.319455 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-catalog-content\") pod \"57182b06-26ff-496b-9289-665447e61e19\" (UID: \"57182b06-26ff-496b-9289-665447e61e19\") " Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.320212 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-utilities" (OuterVolumeSpecName: "utilities") pod "57182b06-26ff-496b-9289-665447e61e19" (UID: "57182b06-26ff-496b-9289-665447e61e19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.324073 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57182b06-26ff-496b-9289-665447e61e19-kube-api-access-xdfvw" (OuterVolumeSpecName: "kube-api-access-xdfvw") pod "57182b06-26ff-496b-9289-665447e61e19" (UID: "57182b06-26ff-496b-9289-665447e61e19"). InnerVolumeSpecName "kube-api-access-xdfvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.368466 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57182b06-26ff-496b-9289-665447e61e19" (UID: "57182b06-26ff-496b-9289-665447e61e19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.420692 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.420735 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdfvw\" (UniqueName: \"kubernetes.io/projected/57182b06-26ff-496b-9289-665447e61e19-kube-api-access-xdfvw\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.420750 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57182b06-26ff-496b-9289-665447e61e19-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:12:44 crc kubenswrapper[4840]: I1209 17:12:44.616296 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" path="/var/lib/kubelet/pods/59e87d58-ff42-4c17-88f8-70aba10ea8bd/volumes" Dec 09 17:12:45 crc kubenswrapper[4840]: I1209 17:12:45.053410 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsp52" event={"ID":"57182b06-26ff-496b-9289-665447e61e19","Type":"ContainerDied","Data":"e7c4b1a3d83d71cc2ff8188d2668c7f6d7ad6ebe10a94afca4fce786a71a58ea"} Dec 09 17:12:45 crc kubenswrapper[4840]: I1209 17:12:45.053472 4840 scope.go:117] "RemoveContainer" containerID="3b15708ee0d694e419fa19957ab400437faa770eb22e75419ac3a68be28df292" Dec 09 17:12:45 crc kubenswrapper[4840]: I1209 17:12:45.053582 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsp52" Dec 09 17:12:45 crc kubenswrapper[4840]: I1209 17:12:45.072107 4840 scope.go:117] "RemoveContainer" containerID="ace7460cefbae64b1d2c82f148a128685e50f009fc07432491f5e75e3b75c3b2" Dec 09 17:12:45 crc kubenswrapper[4840]: I1209 17:12:45.076921 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vsp52"] Dec 09 17:12:45 crc kubenswrapper[4840]: I1209 17:12:45.082374 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vsp52"] Dec 09 17:12:45 crc kubenswrapper[4840]: I1209 17:12:45.091882 4840 scope.go:117] "RemoveContainer" containerID="5966b6eaa66c5c7e1acc9e798e08c674647d6c781ff79493ca88a8a4027b1bce" Dec 09 17:12:46 crc kubenswrapper[4840]: I1209 17:12:46.617247 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57182b06-26ff-496b-9289-665447e61e19" path="/var/lib/kubelet/pods/57182b06-26ff-496b-9289-665447e61e19/volumes" Dec 09 17:13:00 crc kubenswrapper[4840]: I1209 17:13:00.432937 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5fff64b888-svqsz" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.247798 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-h879g"] Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.248075 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" containerName="extract-utilities" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248091 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" containerName="extract-utilities" Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.248099 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57182b06-26ff-496b-9289-665447e61e19" containerName="extract-content" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248105 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="57182b06-26ff-496b-9289-665447e61e19" containerName="extract-content" Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.248115 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" containerName="registry-server" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248123 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" containerName="registry-server" Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.248134 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" containerName="registry-server" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248140 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" containerName="registry-server" Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.248152 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" containerName="extract-content" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248158 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" containerName="extract-content" Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.248169 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57182b06-26ff-496b-9289-665447e61e19" containerName="registry-server" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248175 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="57182b06-26ff-496b-9289-665447e61e19" containerName="registry-server" Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.248184 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57182b06-26ff-496b-9289-665447e61e19" containerName="extract-utilities" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248192 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="57182b06-26ff-496b-9289-665447e61e19" containerName="extract-utilities" Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.248208 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" containerName="extract-content" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248216 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" containerName="extract-content" Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.248226 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" containerName="extract-utilities" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248236 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" containerName="extract-utilities" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248370 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="57182b06-26ff-496b-9289-665447e61e19" containerName="registry-server" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248398 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="59e87d58-ff42-4c17-88f8-70aba10ea8bd" containerName="registry-server" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.248407 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b76aa4d-818f-4e97-86cb-d24b84829dd7" containerName="registry-server" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.250785 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.252874 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.253669 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.255147 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-f94f7" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.267316 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z"] Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.268232 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.277837 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.282398 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z"] Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.331193 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-9bdk4"] Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.334123 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.337409 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-vnp7l" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.341041 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.341089 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.341150 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.345574 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-6zzjs"] Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.346494 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.348540 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.357272 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-6zzjs"] Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.366459 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-frr-sockets\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.366516 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-frr-conf\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.366560 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b871bd33-2669-454b-80ce-fd914f836d1d-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-n7s2z\" (UID: \"b871bd33-2669-454b-80ce-fd914f836d1d\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.366581 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gm5rc\" (UniqueName: \"kubernetes.io/projected/eb375d89-155d-44fb-ad5b-f9cca1276898-kube-api-access-gm5rc\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.366606 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eb375d89-155d-44fb-ad5b-f9cca1276898-metrics-certs\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.366623 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-metrics\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.366701 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/eb375d89-155d-44fb-ad5b-f9cca1276898-frr-startup\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.366755 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-reloader\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.366825 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vp2t4\" (UniqueName: \"kubernetes.io/projected/b871bd33-2669-454b-80ce-fd914f836d1d-kube-api-access-vp2t4\") pod \"frr-k8s-webhook-server-7fcb986d4-n7s2z\" (UID: \"b871bd33-2669-454b-80ce-fd914f836d1d\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468163 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlcjc\" (UniqueName: \"kubernetes.io/projected/35e4931f-7227-403e-aaf9-2426fdef84d8-kube-api-access-wlcjc\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468211 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-frr-conf\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468236 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b871bd33-2669-454b-80ce-fd914f836d1d-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-n7s2z\" (UID: \"b871bd33-2669-454b-80ce-fd914f836d1d\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468260 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gm5rc\" (UniqueName: \"kubernetes.io/projected/eb375d89-155d-44fb-ad5b-f9cca1276898-kube-api-access-gm5rc\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468283 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eb375d89-155d-44fb-ad5b-f9cca1276898-metrics-certs\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468300 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-metrics\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468322 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/eb375d89-155d-44fb-ad5b-f9cca1276898-frr-startup\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468339 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-reloader\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468362 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/26db22e3-364b-4813-878c-ef0d99a342e8-metrics-certs\") pod \"controller-f8648f98b-6zzjs\" (UID: \"26db22e3-364b-4813-878c-ef0d99a342e8\") " pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468381 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-memberlist\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.468379 4840 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468397 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-metrics-certs\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.468477 4840 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.468547 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eb375d89-155d-44fb-ad5b-f9cca1276898-metrics-certs podName:eb375d89-155d-44fb-ad5b-f9cca1276898 nodeName:}" failed. No retries permitted until 2025-12-09 17:13:01.968524728 +0000 UTC m=+967.959635361 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/eb375d89-155d-44fb-ad5b-f9cca1276898-metrics-certs") pod "frr-k8s-h879g" (UID: "eb375d89-155d-44fb-ad5b-f9cca1276898") : secret "frr-k8s-certs-secret" not found Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.468569 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b871bd33-2669-454b-80ce-fd914f836d1d-cert podName:b871bd33-2669-454b-80ce-fd914f836d1d nodeName:}" failed. No retries permitted until 2025-12-09 17:13:01.968558399 +0000 UTC m=+967.959669172 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b871bd33-2669-454b-80ce-fd914f836d1d-cert") pod "frr-k8s-webhook-server-7fcb986d4-n7s2z" (UID: "b871bd33-2669-454b-80ce-fd914f836d1d") : secret "frr-k8s-webhook-server-cert" not found Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468601 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-frr-conf\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468672 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzpr6\" (UniqueName: \"kubernetes.io/projected/26db22e3-364b-4813-878c-ef0d99a342e8-kube-api-access-fzpr6\") pod \"controller-f8648f98b-6zzjs\" (UID: \"26db22e3-364b-4813-878c-ef0d99a342e8\") " pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468695 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vp2t4\" (UniqueName: \"kubernetes.io/projected/b871bd33-2669-454b-80ce-fd914f836d1d-kube-api-access-vp2t4\") pod \"frr-k8s-webhook-server-7fcb986d4-n7s2z\" (UID: \"b871bd33-2669-454b-80ce-fd914f836d1d\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468709 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/26db22e3-364b-4813-878c-ef0d99a342e8-cert\") pod \"controller-f8648f98b-6zzjs\" (UID: \"26db22e3-364b-4813-878c-ef0d99a342e8\") " pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468727 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/35e4931f-7227-403e-aaf9-2426fdef84d8-metallb-excludel2\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468743 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-frr-sockets\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468746 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-metrics\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.468828 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-reloader\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.469045 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/eb375d89-155d-44fb-ad5b-f9cca1276898-frr-sockets\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.469645 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/eb375d89-155d-44fb-ad5b-f9cca1276898-frr-startup\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.495875 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gm5rc\" (UniqueName: \"kubernetes.io/projected/eb375d89-155d-44fb-ad5b-f9cca1276898-kube-api-access-gm5rc\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.495875 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vp2t4\" (UniqueName: \"kubernetes.io/projected/b871bd33-2669-454b-80ce-fd914f836d1d-kube-api-access-vp2t4\") pod \"frr-k8s-webhook-server-7fcb986d4-n7s2z\" (UID: \"b871bd33-2669-454b-80ce-fd914f836d1d\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.570408 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/26db22e3-364b-4813-878c-ef0d99a342e8-metrics-certs\") pod \"controller-f8648f98b-6zzjs\" (UID: \"26db22e3-364b-4813-878c-ef0d99a342e8\") " pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.570460 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-memberlist\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.570480 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-metrics-certs\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.570524 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzpr6\" (UniqueName: \"kubernetes.io/projected/26db22e3-364b-4813-878c-ef0d99a342e8-kube-api-access-fzpr6\") pod \"controller-f8648f98b-6zzjs\" (UID: \"26db22e3-364b-4813-878c-ef0d99a342e8\") " pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.570543 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/26db22e3-364b-4813-878c-ef0d99a342e8-cert\") pod \"controller-f8648f98b-6zzjs\" (UID: \"26db22e3-364b-4813-878c-ef0d99a342e8\") " pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.570561 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/35e4931f-7227-403e-aaf9-2426fdef84d8-metallb-excludel2\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.570581 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlcjc\" (UniqueName: \"kubernetes.io/projected/35e4931f-7227-403e-aaf9-2426fdef84d8-kube-api-access-wlcjc\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.570582 4840 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.570655 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/26db22e3-364b-4813-878c-ef0d99a342e8-metrics-certs podName:26db22e3-364b-4813-878c-ef0d99a342e8 nodeName:}" failed. No retries permitted until 2025-12-09 17:13:02.070636982 +0000 UTC m=+968.061747615 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/26db22e3-364b-4813-878c-ef0d99a342e8-metrics-certs") pod "controller-f8648f98b-6zzjs" (UID: "26db22e3-364b-4813-878c-ef0d99a342e8") : secret "controller-certs-secret" not found Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.570868 4840 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 09 17:13:01 crc kubenswrapper[4840]: E1209 17:13:01.570910 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-memberlist podName:35e4931f-7227-403e-aaf9-2426fdef84d8 nodeName:}" failed. No retries permitted until 2025-12-09 17:13:02.07089666 +0000 UTC m=+968.062007293 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-memberlist") pod "speaker-9bdk4" (UID: "35e4931f-7227-403e-aaf9-2426fdef84d8") : secret "metallb-memberlist" not found Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.571528 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/35e4931f-7227-403e-aaf9-2426fdef84d8-metallb-excludel2\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.572904 4840 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.574452 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-metrics-certs\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.584549 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/26db22e3-364b-4813-878c-ef0d99a342e8-cert\") pod \"controller-f8648f98b-6zzjs\" (UID: \"26db22e3-364b-4813-878c-ef0d99a342e8\") " pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.587357 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlcjc\" (UniqueName: \"kubernetes.io/projected/35e4931f-7227-403e-aaf9-2426fdef84d8-kube-api-access-wlcjc\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.592992 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzpr6\" (UniqueName: \"kubernetes.io/projected/26db22e3-364b-4813-878c-ef0d99a342e8-kube-api-access-fzpr6\") pod \"controller-f8648f98b-6zzjs\" (UID: \"26db22e3-364b-4813-878c-ef0d99a342e8\") " pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.976180 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b871bd33-2669-454b-80ce-fd914f836d1d-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-n7s2z\" (UID: \"b871bd33-2669-454b-80ce-fd914f836d1d\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.976250 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eb375d89-155d-44fb-ad5b-f9cca1276898-metrics-certs\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.979344 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eb375d89-155d-44fb-ad5b-f9cca1276898-metrics-certs\") pod \"frr-k8s-h879g\" (UID: \"eb375d89-155d-44fb-ad5b-f9cca1276898\") " pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:01 crc kubenswrapper[4840]: I1209 17:13:01.979696 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b871bd33-2669-454b-80ce-fd914f836d1d-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-n7s2z\" (UID: \"b871bd33-2669-454b-80ce-fd914f836d1d\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" Dec 09 17:13:02 crc kubenswrapper[4840]: I1209 17:13:02.077387 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/26db22e3-364b-4813-878c-ef0d99a342e8-metrics-certs\") pod \"controller-f8648f98b-6zzjs\" (UID: \"26db22e3-364b-4813-878c-ef0d99a342e8\") " pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:02 crc kubenswrapper[4840]: I1209 17:13:02.077445 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-memberlist\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:02 crc kubenswrapper[4840]: E1209 17:13:02.077592 4840 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 09 17:13:02 crc kubenswrapper[4840]: E1209 17:13:02.077652 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-memberlist podName:35e4931f-7227-403e-aaf9-2426fdef84d8 nodeName:}" failed. No retries permitted until 2025-12-09 17:13:03.077636596 +0000 UTC m=+969.068747239 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-memberlist") pod "speaker-9bdk4" (UID: "35e4931f-7227-403e-aaf9-2426fdef84d8") : secret "metallb-memberlist" not found Dec 09 17:13:02 crc kubenswrapper[4840]: I1209 17:13:02.080775 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/26db22e3-364b-4813-878c-ef0d99a342e8-metrics-certs\") pod \"controller-f8648f98b-6zzjs\" (UID: \"26db22e3-364b-4813-878c-ef0d99a342e8\") " pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:02 crc kubenswrapper[4840]: I1209 17:13:02.166272 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:02 crc kubenswrapper[4840]: I1209 17:13:02.184232 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" Dec 09 17:13:02 crc kubenswrapper[4840]: I1209 17:13:02.264676 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:02 crc kubenswrapper[4840]: I1209 17:13:02.674140 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z"] Dec 09 17:13:02 crc kubenswrapper[4840]: W1209 17:13:02.682065 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb871bd33_2669_454b_80ce_fd914f836d1d.slice/crio-9bc638aaaa1d38101d2c178c4ff9f20d7ed7b69b975043b5b9292b00b662b202 WatchSource:0}: Error finding container 9bc638aaaa1d38101d2c178c4ff9f20d7ed7b69b975043b5b9292b00b662b202: Status 404 returned error can't find the container with id 9bc638aaaa1d38101d2c178c4ff9f20d7ed7b69b975043b5b9292b00b662b202 Dec 09 17:13:02 crc kubenswrapper[4840]: I1209 17:13:02.738577 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-6zzjs"] Dec 09 17:13:02 crc kubenswrapper[4840]: W1209 17:13:02.741166 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26db22e3_364b_4813_878c_ef0d99a342e8.slice/crio-09816ca62fabb24118be8caf91deee5177284a8524fcbec4515bc0f6b59fbe0b WatchSource:0}: Error finding container 09816ca62fabb24118be8caf91deee5177284a8524fcbec4515bc0f6b59fbe0b: Status 404 returned error can't find the container with id 09816ca62fabb24118be8caf91deee5177284a8524fcbec4515bc0f6b59fbe0b Dec 09 17:13:03 crc kubenswrapper[4840]: I1209 17:13:03.096295 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-memberlist\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:03 crc kubenswrapper[4840]: I1209 17:13:03.102459 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/35e4931f-7227-403e-aaf9-2426fdef84d8-memberlist\") pod \"speaker-9bdk4\" (UID: \"35e4931f-7227-403e-aaf9-2426fdef84d8\") " pod="metallb-system/speaker-9bdk4" Dec 09 17:13:03 crc kubenswrapper[4840]: I1209 17:13:03.153994 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-9bdk4" Dec 09 17:13:03 crc kubenswrapper[4840]: W1209 17:13:03.175167 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35e4931f_7227_403e_aaf9_2426fdef84d8.slice/crio-07f3d91451337dc004a7297e572bc03bd5aa29733495a5f62b2b796d34a56de1 WatchSource:0}: Error finding container 07f3d91451337dc004a7297e572bc03bd5aa29733495a5f62b2b796d34a56de1: Status 404 returned error can't find the container with id 07f3d91451337dc004a7297e572bc03bd5aa29733495a5f62b2b796d34a56de1 Dec 09 17:13:03 crc kubenswrapper[4840]: I1209 17:13:03.206944 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-6zzjs" event={"ID":"26db22e3-364b-4813-878c-ef0d99a342e8","Type":"ContainerStarted","Data":"e406295b8408b7a3c85b1249c9466869e756e31227f04387bd83301ddd9acc61"} Dec 09 17:13:03 crc kubenswrapper[4840]: I1209 17:13:03.207229 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-6zzjs" event={"ID":"26db22e3-364b-4813-878c-ef0d99a342e8","Type":"ContainerStarted","Data":"35770ddf64d05d063cc81febae6d92dd36b2ba9241b8dc4e5235e4564eb7ceb0"} Dec 09 17:13:03 crc kubenswrapper[4840]: I1209 17:13:03.207243 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:03 crc kubenswrapper[4840]: I1209 17:13:03.207252 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-6zzjs" event={"ID":"26db22e3-364b-4813-878c-ef0d99a342e8","Type":"ContainerStarted","Data":"09816ca62fabb24118be8caf91deee5177284a8524fcbec4515bc0f6b59fbe0b"} Dec 09 17:13:03 crc kubenswrapper[4840]: I1209 17:13:03.208209 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-h879g" event={"ID":"eb375d89-155d-44fb-ad5b-f9cca1276898","Type":"ContainerStarted","Data":"3fb9897cf03fbfdb0faf0058849c26c9d84f2f4388d3be747ee0e3421979fee4"} Dec 09 17:13:03 crc kubenswrapper[4840]: I1209 17:13:03.209565 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-9bdk4" event={"ID":"35e4931f-7227-403e-aaf9-2426fdef84d8","Type":"ContainerStarted","Data":"07f3d91451337dc004a7297e572bc03bd5aa29733495a5f62b2b796d34a56de1"} Dec 09 17:13:03 crc kubenswrapper[4840]: I1209 17:13:03.210830 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" event={"ID":"b871bd33-2669-454b-80ce-fd914f836d1d","Type":"ContainerStarted","Data":"9bc638aaaa1d38101d2c178c4ff9f20d7ed7b69b975043b5b9292b00b662b202"} Dec 09 17:13:03 crc kubenswrapper[4840]: I1209 17:13:03.225594 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-6zzjs" podStartSLOduration=2.225571525 podStartE2EDuration="2.225571525s" podCreationTimestamp="2025-12-09 17:13:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:13:03.224625208 +0000 UTC m=+969.215735841" watchObservedRunningTime="2025-12-09 17:13:03.225571525 +0000 UTC m=+969.216682178" Dec 09 17:13:04 crc kubenswrapper[4840]: I1209 17:13:04.237769 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-9bdk4" event={"ID":"35e4931f-7227-403e-aaf9-2426fdef84d8","Type":"ContainerStarted","Data":"ce1a76fce097fc7274392ccc443b08f088ab7d19d6222d9aac42add9b68d2a56"} Dec 09 17:13:04 crc kubenswrapper[4840]: I1209 17:13:04.237829 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-9bdk4" event={"ID":"35e4931f-7227-403e-aaf9-2426fdef84d8","Type":"ContainerStarted","Data":"1095b545d9231a038863056fa2653d573fbb046486b73c0542d1c7347131b0e9"} Dec 09 17:13:04 crc kubenswrapper[4840]: I1209 17:13:04.237887 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-9bdk4" Dec 09 17:13:04 crc kubenswrapper[4840]: I1209 17:13:04.272580 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-9bdk4" podStartSLOduration=3.272520723 podStartE2EDuration="3.272520723s" podCreationTimestamp="2025-12-09 17:13:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:13:04.255660892 +0000 UTC m=+970.246771525" watchObservedRunningTime="2025-12-09 17:13:04.272520723 +0000 UTC m=+970.263631356" Dec 09 17:13:10 crc kubenswrapper[4840]: I1209 17:13:10.293665 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" event={"ID":"b871bd33-2669-454b-80ce-fd914f836d1d","Type":"ContainerStarted","Data":"b5d5559099f92b6395aba33af2fb45e14c0076061193ac0eef95c7bf17364171"} Dec 09 17:13:10 crc kubenswrapper[4840]: I1209 17:13:10.294431 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" Dec 09 17:13:10 crc kubenswrapper[4840]: I1209 17:13:10.295616 4840 generic.go:334] "Generic (PLEG): container finished" podID="eb375d89-155d-44fb-ad5b-f9cca1276898" containerID="42dbf841eaa4fff42db442ecda62e1076f29c16362d56041505a8e0c21d4e3c3" exitCode=0 Dec 09 17:13:10 crc kubenswrapper[4840]: I1209 17:13:10.295658 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-h879g" event={"ID":"eb375d89-155d-44fb-ad5b-f9cca1276898","Type":"ContainerDied","Data":"42dbf841eaa4fff42db442ecda62e1076f29c16362d56041505a8e0c21d4e3c3"} Dec 09 17:13:10 crc kubenswrapper[4840]: I1209 17:13:10.318020 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" podStartSLOduration=2.256340335 podStartE2EDuration="9.317995825s" podCreationTimestamp="2025-12-09 17:13:01 +0000 UTC" firstStartedPulling="2025-12-09 17:13:02.684732486 +0000 UTC m=+968.675843129" lastFinishedPulling="2025-12-09 17:13:09.746387976 +0000 UTC m=+975.737498619" observedRunningTime="2025-12-09 17:13:10.312150545 +0000 UTC m=+976.303261178" watchObservedRunningTime="2025-12-09 17:13:10.317995825 +0000 UTC m=+976.309106458" Dec 09 17:13:11 crc kubenswrapper[4840]: I1209 17:13:11.306376 4840 generic.go:334] "Generic (PLEG): container finished" podID="eb375d89-155d-44fb-ad5b-f9cca1276898" containerID="4dc562b83aa2f6f3a0dafbc85904f74d0aa87308ff9e313db8069f3c4bed1f1d" exitCode=0 Dec 09 17:13:11 crc kubenswrapper[4840]: I1209 17:13:11.306449 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-h879g" event={"ID":"eb375d89-155d-44fb-ad5b-f9cca1276898","Type":"ContainerDied","Data":"4dc562b83aa2f6f3a0dafbc85904f74d0aa87308ff9e313db8069f3c4bed1f1d"} Dec 09 17:13:12 crc kubenswrapper[4840]: I1209 17:13:12.270876 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-6zzjs" Dec 09 17:13:12 crc kubenswrapper[4840]: I1209 17:13:12.316948 4840 generic.go:334] "Generic (PLEG): container finished" podID="eb375d89-155d-44fb-ad5b-f9cca1276898" containerID="5c503c6afcc01f30968be4824c57ca6fde813eacec62ae58b52b6e5fa6569919" exitCode=0 Dec 09 17:13:12 crc kubenswrapper[4840]: I1209 17:13:12.317006 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-h879g" event={"ID":"eb375d89-155d-44fb-ad5b-f9cca1276898","Type":"ContainerDied","Data":"5c503c6afcc01f30968be4824c57ca6fde813eacec62ae58b52b6e5fa6569919"} Dec 09 17:13:13 crc kubenswrapper[4840]: I1209 17:13:13.163066 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-9bdk4" Dec 09 17:13:13 crc kubenswrapper[4840]: I1209 17:13:13.326803 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-h879g" event={"ID":"eb375d89-155d-44fb-ad5b-f9cca1276898","Type":"ContainerStarted","Data":"9d528a4f8ec2cd669315885bc690b5cba8362353cd31eb1af4005d5a826f1c1a"} Dec 09 17:13:13 crc kubenswrapper[4840]: I1209 17:13:13.326853 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-h879g" event={"ID":"eb375d89-155d-44fb-ad5b-f9cca1276898","Type":"ContainerStarted","Data":"5d4af8f6c9d2a762fc2d089afbe609fe3e31b3e72940eb8841e53824888c51f1"} Dec 09 17:13:13 crc kubenswrapper[4840]: I1209 17:13:13.326866 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-h879g" event={"ID":"eb375d89-155d-44fb-ad5b-f9cca1276898","Type":"ContainerStarted","Data":"40f3b825b8fa0c3fce628dd9a20e66d27542d0c076ff40456198d8764f837fa0"} Dec 09 17:13:13 crc kubenswrapper[4840]: I1209 17:13:13.326877 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-h879g" event={"ID":"eb375d89-155d-44fb-ad5b-f9cca1276898","Type":"ContainerStarted","Data":"64959bb8a57f82100a156e67a5fc0883e379cdcf6eeb88385345556058760500"} Dec 09 17:13:14 crc kubenswrapper[4840]: I1209 17:13:14.335243 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-h879g" event={"ID":"eb375d89-155d-44fb-ad5b-f9cca1276898","Type":"ContainerStarted","Data":"81e75a8f0c7003d78be79cba994b59dc0540744d710c3bdc42d59ff8eba5ee05"} Dec 09 17:13:15 crc kubenswrapper[4840]: I1209 17:13:15.344746 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-h879g" event={"ID":"eb375d89-155d-44fb-ad5b-f9cca1276898","Type":"ContainerStarted","Data":"4fc556a2aae05e21b56e9b66a5323a8aa0a176c47c656025710749ba308e5f18"} Dec 09 17:13:15 crc kubenswrapper[4840]: I1209 17:13:15.345649 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:15 crc kubenswrapper[4840]: I1209 17:13:15.383499 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-h879g" podStartSLOduration=7.011344832 podStartE2EDuration="14.383475841s" podCreationTimestamp="2025-12-09 17:13:01 +0000 UTC" firstStartedPulling="2025-12-09 17:13:02.352409608 +0000 UTC m=+968.343520251" lastFinishedPulling="2025-12-09 17:13:09.724540617 +0000 UTC m=+975.715651260" observedRunningTime="2025-12-09 17:13:15.378959157 +0000 UTC m=+981.370069810" watchObservedRunningTime="2025-12-09 17:13:15.383475841 +0000 UTC m=+981.374586464" Dec 09 17:13:16 crc kubenswrapper[4840]: I1209 17:13:16.090225 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-5zznw"] Dec 09 17:13:16 crc kubenswrapper[4840]: I1209 17:13:16.091176 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5zznw" Dec 09 17:13:16 crc kubenswrapper[4840]: I1209 17:13:16.094232 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-5pb8b" Dec 09 17:13:16 crc kubenswrapper[4840]: I1209 17:13:16.094622 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Dec 09 17:13:16 crc kubenswrapper[4840]: I1209 17:13:16.139575 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Dec 09 17:13:16 crc kubenswrapper[4840]: I1209 17:13:16.144436 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-5zznw"] Dec 09 17:13:16 crc kubenswrapper[4840]: I1209 17:13:16.186820 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrlmp\" (UniqueName: \"kubernetes.io/projected/3978dbcc-87ad-4c0c-a4f4-72d18795ceac-kube-api-access-mrlmp\") pod \"openstack-operator-index-5zznw\" (UID: \"3978dbcc-87ad-4c0c-a4f4-72d18795ceac\") " pod="openstack-operators/openstack-operator-index-5zznw" Dec 09 17:13:16 crc kubenswrapper[4840]: I1209 17:13:16.287974 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrlmp\" (UniqueName: \"kubernetes.io/projected/3978dbcc-87ad-4c0c-a4f4-72d18795ceac-kube-api-access-mrlmp\") pod \"openstack-operator-index-5zznw\" (UID: \"3978dbcc-87ad-4c0c-a4f4-72d18795ceac\") " pod="openstack-operators/openstack-operator-index-5zznw" Dec 09 17:13:16 crc kubenswrapper[4840]: I1209 17:13:16.307671 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrlmp\" (UniqueName: \"kubernetes.io/projected/3978dbcc-87ad-4c0c-a4f4-72d18795ceac-kube-api-access-mrlmp\") pod \"openstack-operator-index-5zznw\" (UID: \"3978dbcc-87ad-4c0c-a4f4-72d18795ceac\") " pod="openstack-operators/openstack-operator-index-5zznw" Dec 09 17:13:16 crc kubenswrapper[4840]: I1209 17:13:16.449403 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5zznw" Dec 09 17:13:16 crc kubenswrapper[4840]: I1209 17:13:16.909730 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-5zznw"] Dec 09 17:13:16 crc kubenswrapper[4840]: W1209 17:13:16.924304 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3978dbcc_87ad_4c0c_a4f4_72d18795ceac.slice/crio-3bc134abfe3a182360d34334b309eef8b041ca3edc6d165f4bb0fd2a352c5159 WatchSource:0}: Error finding container 3bc134abfe3a182360d34334b309eef8b041ca3edc6d165f4bb0fd2a352c5159: Status 404 returned error can't find the container with id 3bc134abfe3a182360d34334b309eef8b041ca3edc6d165f4bb0fd2a352c5159 Dec 09 17:13:17 crc kubenswrapper[4840]: I1209 17:13:17.167386 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:17 crc kubenswrapper[4840]: I1209 17:13:17.272755 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:17 crc kubenswrapper[4840]: I1209 17:13:17.358279 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5zznw" event={"ID":"3978dbcc-87ad-4c0c-a4f4-72d18795ceac","Type":"ContainerStarted","Data":"3bc134abfe3a182360d34334b309eef8b041ca3edc6d165f4bb0fd2a352c5159"} Dec 09 17:13:19 crc kubenswrapper[4840]: I1209 17:13:19.456895 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-5zznw"] Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.077789 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-6ft79"] Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.081344 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6ft79" Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.094403 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6ft79"] Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.145653 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lctjq\" (UniqueName: \"kubernetes.io/projected/146acea0-42bf-4e51-b660-5577f8c2ea66-kube-api-access-lctjq\") pod \"openstack-operator-index-6ft79\" (UID: \"146acea0-42bf-4e51-b660-5577f8c2ea66\") " pod="openstack-operators/openstack-operator-index-6ft79" Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.246658 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lctjq\" (UniqueName: \"kubernetes.io/projected/146acea0-42bf-4e51-b660-5577f8c2ea66-kube-api-access-lctjq\") pod \"openstack-operator-index-6ft79\" (UID: \"146acea0-42bf-4e51-b660-5577f8c2ea66\") " pod="openstack-operators/openstack-operator-index-6ft79" Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.271292 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lctjq\" (UniqueName: \"kubernetes.io/projected/146acea0-42bf-4e51-b660-5577f8c2ea66-kube-api-access-lctjq\") pod \"openstack-operator-index-6ft79\" (UID: \"146acea0-42bf-4e51-b660-5577f8c2ea66\") " pod="openstack-operators/openstack-operator-index-6ft79" Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.380682 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5zznw" event={"ID":"3978dbcc-87ad-4c0c-a4f4-72d18795ceac","Type":"ContainerStarted","Data":"8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06"} Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.381042 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-5zznw" podUID="3978dbcc-87ad-4c0c-a4f4-72d18795ceac" containerName="registry-server" containerID="cri-o://8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06" gracePeriod=2 Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.409584 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-5zznw" podStartSLOduration=1.6499716819999999 podStartE2EDuration="4.409566668s" podCreationTimestamp="2025-12-09 17:13:16 +0000 UTC" firstStartedPulling="2025-12-09 17:13:16.92579317 +0000 UTC m=+982.916903793" lastFinishedPulling="2025-12-09 17:13:19.685388146 +0000 UTC m=+985.676498779" observedRunningTime="2025-12-09 17:13:20.406478393 +0000 UTC m=+986.397589036" watchObservedRunningTime="2025-12-09 17:13:20.409566668 +0000 UTC m=+986.400677311" Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.409746 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6ft79" Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.900330 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5zznw" Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.900700 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6ft79"] Dec 09 17:13:20 crc kubenswrapper[4840]: W1209 17:13:20.901426 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod146acea0_42bf_4e51_b660_5577f8c2ea66.slice/crio-3a910544bcb2bd25ed892eddf793f239f9d1481e035806bdde184f574e850943 WatchSource:0}: Error finding container 3a910544bcb2bd25ed892eddf793f239f9d1481e035806bdde184f574e850943: Status 404 returned error can't find the container with id 3a910544bcb2bd25ed892eddf793f239f9d1481e035806bdde184f574e850943 Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.909889 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.956268 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrlmp\" (UniqueName: \"kubernetes.io/projected/3978dbcc-87ad-4c0c-a4f4-72d18795ceac-kube-api-access-mrlmp\") pod \"3978dbcc-87ad-4c0c-a4f4-72d18795ceac\" (UID: \"3978dbcc-87ad-4c0c-a4f4-72d18795ceac\") " Dec 09 17:13:20 crc kubenswrapper[4840]: I1209 17:13:20.960004 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3978dbcc-87ad-4c0c-a4f4-72d18795ceac-kube-api-access-mrlmp" (OuterVolumeSpecName: "kube-api-access-mrlmp") pod "3978dbcc-87ad-4c0c-a4f4-72d18795ceac" (UID: "3978dbcc-87ad-4c0c-a4f4-72d18795ceac"). InnerVolumeSpecName "kube-api-access-mrlmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.058476 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrlmp\" (UniqueName: \"kubernetes.io/projected/3978dbcc-87ad-4c0c-a4f4-72d18795ceac-kube-api-access-mrlmp\") on node \"crc\" DevicePath \"\"" Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.390919 4840 generic.go:334] "Generic (PLEG): container finished" podID="3978dbcc-87ad-4c0c-a4f4-72d18795ceac" containerID="8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06" exitCode=0 Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.391005 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5zznw" event={"ID":"3978dbcc-87ad-4c0c-a4f4-72d18795ceac","Type":"ContainerDied","Data":"8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06"} Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.391344 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5zznw" event={"ID":"3978dbcc-87ad-4c0c-a4f4-72d18795ceac","Type":"ContainerDied","Data":"3bc134abfe3a182360d34334b309eef8b041ca3edc6d165f4bb0fd2a352c5159"} Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.391402 4840 scope.go:117] "RemoveContainer" containerID="8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06" Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.391059 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5zznw" Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.393937 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6ft79" event={"ID":"146acea0-42bf-4e51-b660-5577f8c2ea66","Type":"ContainerStarted","Data":"fc84e2e33140befb032488d9909caeb424c33eec0417567916eb789ecbbf5023"} Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.394005 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6ft79" event={"ID":"146acea0-42bf-4e51-b660-5577f8c2ea66","Type":"ContainerStarted","Data":"3a910544bcb2bd25ed892eddf793f239f9d1481e035806bdde184f574e850943"} Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.411283 4840 scope.go:117] "RemoveContainer" containerID="8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06" Dec 09 17:13:21 crc kubenswrapper[4840]: E1209 17:13:21.411880 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06\": container with ID starting with 8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06 not found: ID does not exist" containerID="8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06" Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.411937 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06"} err="failed to get container status \"8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06\": rpc error: code = NotFound desc = could not find container \"8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06\": container with ID starting with 8ce2c8b3fccb9c8833953c8948f785cf5849d54433878d1c66de3133f1686b06 not found: ID does not exist" Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.413192 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-6ft79" podStartSLOduration=1.3564215640000001 podStartE2EDuration="1.41317935s" podCreationTimestamp="2025-12-09 17:13:20 +0000 UTC" firstStartedPulling="2025-12-09 17:13:20.909661867 +0000 UTC m=+986.900772500" lastFinishedPulling="2025-12-09 17:13:20.966419643 +0000 UTC m=+986.957530286" observedRunningTime="2025-12-09 17:13:21.410028853 +0000 UTC m=+987.401139506" watchObservedRunningTime="2025-12-09 17:13:21.41317935 +0000 UTC m=+987.404289983" Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.439108 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-5zznw"] Dec 09 17:13:21 crc kubenswrapper[4840]: I1209 17:13:21.448647 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-5zznw"] Dec 09 17:13:22 crc kubenswrapper[4840]: I1209 17:13:22.170159 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-h879g" Dec 09 17:13:22 crc kubenswrapper[4840]: I1209 17:13:22.198312 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-n7s2z" Dec 09 17:13:22 crc kubenswrapper[4840]: I1209 17:13:22.623834 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3978dbcc-87ad-4c0c-a4f4-72d18795ceac" path="/var/lib/kubelet/pods/3978dbcc-87ad-4c0c-a4f4-72d18795ceac/volumes" Dec 09 17:13:30 crc kubenswrapper[4840]: I1209 17:13:30.411072 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-6ft79" Dec 09 17:13:30 crc kubenswrapper[4840]: I1209 17:13:30.411864 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-6ft79" Dec 09 17:13:30 crc kubenswrapper[4840]: I1209 17:13:30.459613 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-6ft79" Dec 09 17:13:30 crc kubenswrapper[4840]: I1209 17:13:30.500182 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-6ft79" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.353017 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d"] Dec 09 17:13:36 crc kubenswrapper[4840]: E1209 17:13:36.353764 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3978dbcc-87ad-4c0c-a4f4-72d18795ceac" containerName="registry-server" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.353786 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="3978dbcc-87ad-4c0c-a4f4-72d18795ceac" containerName="registry-server" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.354048 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="3978dbcc-87ad-4c0c-a4f4-72d18795ceac" containerName="registry-server" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.355619 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.357960 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-hdhmm" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.363504 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d"] Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.461103 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-util\") pod \"84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.461175 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-bundle\") pod \"84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.461244 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzqd9\" (UniqueName: \"kubernetes.io/projected/8ce90f28-39dd-430b-8662-cf8d01eb6af1-kube-api-access-jzqd9\") pod \"84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.562310 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzqd9\" (UniqueName: \"kubernetes.io/projected/8ce90f28-39dd-430b-8662-cf8d01eb6af1-kube-api-access-jzqd9\") pod \"84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.562393 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-util\") pod \"84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.562436 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-bundle\") pod \"84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.562933 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-bundle\") pod \"84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.563533 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-util\") pod \"84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.592159 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzqd9\" (UniqueName: \"kubernetes.io/projected/8ce90f28-39dd-430b-8662-cf8d01eb6af1-kube-api-access-jzqd9\") pod \"84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:36 crc kubenswrapper[4840]: I1209 17:13:36.679809 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:37 crc kubenswrapper[4840]: I1209 17:13:37.136709 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d"] Dec 09 17:13:37 crc kubenswrapper[4840]: W1209 17:13:37.148614 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ce90f28_39dd_430b_8662_cf8d01eb6af1.slice/crio-7c6f720843a12089ae97f1de4e22981e3eb15f7d64ecfab9ea4bc303abb91b43 WatchSource:0}: Error finding container 7c6f720843a12089ae97f1de4e22981e3eb15f7d64ecfab9ea4bc303abb91b43: Status 404 returned error can't find the container with id 7c6f720843a12089ae97f1de4e22981e3eb15f7d64ecfab9ea4bc303abb91b43 Dec 09 17:13:37 crc kubenswrapper[4840]: I1209 17:13:37.517656 4840 generic.go:334] "Generic (PLEG): container finished" podID="8ce90f28-39dd-430b-8662-cf8d01eb6af1" containerID="bca0932d4c8060400482be7e9733685f4b25f412ae144e5ccb272ae32f2cc2e7" exitCode=0 Dec 09 17:13:37 crc kubenswrapper[4840]: I1209 17:13:37.517695 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" event={"ID":"8ce90f28-39dd-430b-8662-cf8d01eb6af1","Type":"ContainerDied","Data":"bca0932d4c8060400482be7e9733685f4b25f412ae144e5ccb272ae32f2cc2e7"} Dec 09 17:13:37 crc kubenswrapper[4840]: I1209 17:13:37.517720 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" event={"ID":"8ce90f28-39dd-430b-8662-cf8d01eb6af1","Type":"ContainerStarted","Data":"7c6f720843a12089ae97f1de4e22981e3eb15f7d64ecfab9ea4bc303abb91b43"} Dec 09 17:13:38 crc kubenswrapper[4840]: E1209 17:13:38.120769 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ce90f28_39dd_430b_8662_cf8d01eb6af1.slice/crio-conmon-10cd1610ccc5b3ccc7d6e74f4e011c1480bbfa7aa8a6c049563cede06ad41022.scope\": RecentStats: unable to find data in memory cache]" Dec 09 17:13:38 crc kubenswrapper[4840]: I1209 17:13:38.529747 4840 generic.go:334] "Generic (PLEG): container finished" podID="8ce90f28-39dd-430b-8662-cf8d01eb6af1" containerID="10cd1610ccc5b3ccc7d6e74f4e011c1480bbfa7aa8a6c049563cede06ad41022" exitCode=0 Dec 09 17:13:38 crc kubenswrapper[4840]: I1209 17:13:38.529855 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" event={"ID":"8ce90f28-39dd-430b-8662-cf8d01eb6af1","Type":"ContainerDied","Data":"10cd1610ccc5b3ccc7d6e74f4e011c1480bbfa7aa8a6c049563cede06ad41022"} Dec 09 17:13:39 crc kubenswrapper[4840]: I1209 17:13:39.539900 4840 generic.go:334] "Generic (PLEG): container finished" podID="8ce90f28-39dd-430b-8662-cf8d01eb6af1" containerID="0af59263c825bf992b82c5a9f789d27d139065977d1f20fa8059980108ff284b" exitCode=0 Dec 09 17:13:39 crc kubenswrapper[4840]: I1209 17:13:39.540037 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" event={"ID":"8ce90f28-39dd-430b-8662-cf8d01eb6af1","Type":"ContainerDied","Data":"0af59263c825bf992b82c5a9f789d27d139065977d1f20fa8059980108ff284b"} Dec 09 17:13:40 crc kubenswrapper[4840]: I1209 17:13:40.876425 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:40 crc kubenswrapper[4840]: I1209 17:13:40.931145 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzqd9\" (UniqueName: \"kubernetes.io/projected/8ce90f28-39dd-430b-8662-cf8d01eb6af1-kube-api-access-jzqd9\") pod \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " Dec 09 17:13:40 crc kubenswrapper[4840]: I1209 17:13:40.931220 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-util\") pod \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " Dec 09 17:13:40 crc kubenswrapper[4840]: I1209 17:13:40.931250 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-bundle\") pod \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\" (UID: \"8ce90f28-39dd-430b-8662-cf8d01eb6af1\") " Dec 09 17:13:40 crc kubenswrapper[4840]: I1209 17:13:40.932708 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-bundle" (OuterVolumeSpecName: "bundle") pod "8ce90f28-39dd-430b-8662-cf8d01eb6af1" (UID: "8ce90f28-39dd-430b-8662-cf8d01eb6af1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:13:40 crc kubenswrapper[4840]: I1209 17:13:40.938303 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ce90f28-39dd-430b-8662-cf8d01eb6af1-kube-api-access-jzqd9" (OuterVolumeSpecName: "kube-api-access-jzqd9") pod "8ce90f28-39dd-430b-8662-cf8d01eb6af1" (UID: "8ce90f28-39dd-430b-8662-cf8d01eb6af1"). InnerVolumeSpecName "kube-api-access-jzqd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:13:40 crc kubenswrapper[4840]: I1209 17:13:40.962762 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-util" (OuterVolumeSpecName: "util") pod "8ce90f28-39dd-430b-8662-cf8d01eb6af1" (UID: "8ce90f28-39dd-430b-8662-cf8d01eb6af1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:13:41 crc kubenswrapper[4840]: I1209 17:13:41.032703 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzqd9\" (UniqueName: \"kubernetes.io/projected/8ce90f28-39dd-430b-8662-cf8d01eb6af1-kube-api-access-jzqd9\") on node \"crc\" DevicePath \"\"" Dec 09 17:13:41 crc kubenswrapper[4840]: I1209 17:13:41.032746 4840 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-util\") on node \"crc\" DevicePath \"\"" Dec 09 17:13:41 crc kubenswrapper[4840]: I1209 17:13:41.032762 4840 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8ce90f28-39dd-430b-8662-cf8d01eb6af1-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:13:41 crc kubenswrapper[4840]: I1209 17:13:41.572048 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" event={"ID":"8ce90f28-39dd-430b-8662-cf8d01eb6af1","Type":"ContainerDied","Data":"7c6f720843a12089ae97f1de4e22981e3eb15f7d64ecfab9ea4bc303abb91b43"} Dec 09 17:13:41 crc kubenswrapper[4840]: I1209 17:13:41.572103 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c6f720843a12089ae97f1de4e22981e3eb15f7d64ecfab9ea4bc303abb91b43" Dec 09 17:13:41 crc kubenswrapper[4840]: I1209 17:13:41.572301 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d" Dec 09 17:13:43 crc kubenswrapper[4840]: I1209 17:13:43.881969 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5"] Dec 09 17:13:43 crc kubenswrapper[4840]: E1209 17:13:43.882652 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ce90f28-39dd-430b-8662-cf8d01eb6af1" containerName="pull" Dec 09 17:13:43 crc kubenswrapper[4840]: I1209 17:13:43.882668 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ce90f28-39dd-430b-8662-cf8d01eb6af1" containerName="pull" Dec 09 17:13:43 crc kubenswrapper[4840]: E1209 17:13:43.882677 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ce90f28-39dd-430b-8662-cf8d01eb6af1" containerName="util" Dec 09 17:13:43 crc kubenswrapper[4840]: I1209 17:13:43.882684 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ce90f28-39dd-430b-8662-cf8d01eb6af1" containerName="util" Dec 09 17:13:43 crc kubenswrapper[4840]: E1209 17:13:43.882711 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ce90f28-39dd-430b-8662-cf8d01eb6af1" containerName="extract" Dec 09 17:13:43 crc kubenswrapper[4840]: I1209 17:13:43.882719 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ce90f28-39dd-430b-8662-cf8d01eb6af1" containerName="extract" Dec 09 17:13:43 crc kubenswrapper[4840]: I1209 17:13:43.882856 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ce90f28-39dd-430b-8662-cf8d01eb6af1" containerName="extract" Dec 09 17:13:43 crc kubenswrapper[4840]: I1209 17:13:43.883404 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5" Dec 09 17:13:43 crc kubenswrapper[4840]: I1209 17:13:43.886383 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-9kkhx" Dec 09 17:13:43 crc kubenswrapper[4840]: I1209 17:13:43.914703 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5"] Dec 09 17:13:43 crc kubenswrapper[4840]: I1209 17:13:43.971025 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xpnn\" (UniqueName: \"kubernetes.io/projected/b2677e8e-1651-477c-b3ae-b08cc1ab0e6c-kube-api-access-6xpnn\") pod \"openstack-operator-controller-operator-5f5557f974-mq8t5\" (UID: \"b2677e8e-1651-477c-b3ae-b08cc1ab0e6c\") " pod="openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5" Dec 09 17:13:44 crc kubenswrapper[4840]: I1209 17:13:44.072282 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xpnn\" (UniqueName: \"kubernetes.io/projected/b2677e8e-1651-477c-b3ae-b08cc1ab0e6c-kube-api-access-6xpnn\") pod \"openstack-operator-controller-operator-5f5557f974-mq8t5\" (UID: \"b2677e8e-1651-477c-b3ae-b08cc1ab0e6c\") " pod="openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5" Dec 09 17:13:44 crc kubenswrapper[4840]: I1209 17:13:44.093705 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xpnn\" (UniqueName: \"kubernetes.io/projected/b2677e8e-1651-477c-b3ae-b08cc1ab0e6c-kube-api-access-6xpnn\") pod \"openstack-operator-controller-operator-5f5557f974-mq8t5\" (UID: \"b2677e8e-1651-477c-b3ae-b08cc1ab0e6c\") " pod="openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5" Dec 09 17:13:44 crc kubenswrapper[4840]: I1209 17:13:44.204184 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5" Dec 09 17:13:44 crc kubenswrapper[4840]: I1209 17:13:44.702287 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5"] Dec 09 17:13:44 crc kubenswrapper[4840]: W1209 17:13:44.710766 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb2677e8e_1651_477c_b3ae_b08cc1ab0e6c.slice/crio-da929fef1207174ae7d36388f16a15454a0bea849deb61e336c60478fb54f282 WatchSource:0}: Error finding container da929fef1207174ae7d36388f16a15454a0bea849deb61e336c60478fb54f282: Status 404 returned error can't find the container with id da929fef1207174ae7d36388f16a15454a0bea849deb61e336c60478fb54f282 Dec 09 17:13:45 crc kubenswrapper[4840]: I1209 17:13:45.598212 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5" event={"ID":"b2677e8e-1651-477c-b3ae-b08cc1ab0e6c","Type":"ContainerStarted","Data":"da929fef1207174ae7d36388f16a15454a0bea849deb61e336c60478fb54f282"} Dec 09 17:13:49 crc kubenswrapper[4840]: I1209 17:13:49.626340 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5" event={"ID":"b2677e8e-1651-477c-b3ae-b08cc1ab0e6c","Type":"ContainerStarted","Data":"5e6e942953353ff30e45029e219a7d1f458bfb3e86f5131798d8c17426eac28c"} Dec 09 17:13:49 crc kubenswrapper[4840]: I1209 17:13:49.626920 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5" Dec 09 17:13:49 crc kubenswrapper[4840]: I1209 17:13:49.683941 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5" podStartSLOduration=2.280035426 podStartE2EDuration="6.683918096s" podCreationTimestamp="2025-12-09 17:13:43 +0000 UTC" firstStartedPulling="2025-12-09 17:13:44.726037539 +0000 UTC m=+1010.717148172" lastFinishedPulling="2025-12-09 17:13:49.129920219 +0000 UTC m=+1015.121030842" observedRunningTime="2025-12-09 17:13:49.680236115 +0000 UTC m=+1015.671346758" watchObservedRunningTime="2025-12-09 17:13:49.683918096 +0000 UTC m=+1015.675028729" Dec 09 17:13:54 crc kubenswrapper[4840]: I1209 17:13:54.207887 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5f5557f974-mq8t5" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.177538 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.179516 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.181652 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-79v5z" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.183361 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.184247 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.191490 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-jh8cs" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.192719 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.193603 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.195316 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-k6kpf" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.218785 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.218919 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf98w\" (UniqueName: \"kubernetes.io/projected/63ca78d6-7a48-4fcf-bac3-7215c2ca3282-kube-api-access-nf98w\") pod \"designate-operator-controller-manager-697fb699cf-4w9cf\" (UID: \"63ca78d6-7a48-4fcf-bac3-7215c2ca3282\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.218992 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgrjs\" (UniqueName: \"kubernetes.io/projected/3bf967ce-abdb-4d63-a262-861d238218e9-kube-api-access-pgrjs\") pod \"barbican-operator-controller-manager-7d9dfd778-g495k\" (UID: \"3bf967ce-abdb-4d63-a262-861d238218e9\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.219081 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnll2\" (UniqueName: \"kubernetes.io/projected/4637e86f-9342-431b-8bea-80027b740c6a-kube-api-access-wnll2\") pod \"cinder-operator-controller-manager-6c677c69b-v88tx\" (UID: \"4637e86f-9342-431b-8bea-80027b740c6a\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.237534 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.239093 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.240986 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-g7gs4" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.244102 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.256801 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.257936 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.264873 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-s96z2" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.268773 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.274152 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.294437 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.295722 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.298586 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-7grc8" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.311247 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.322011 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnll2\" (UniqueName: \"kubernetes.io/projected/4637e86f-9342-431b-8bea-80027b740c6a-kube-api-access-wnll2\") pod \"cinder-operator-controller-manager-6c677c69b-v88tx\" (UID: \"4637e86f-9342-431b-8bea-80027b740c6a\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.322110 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-955pv\" (UniqueName: \"kubernetes.io/projected/a9b07484-f3d4-441d-8390-03d86f2ffe1f-kube-api-access-955pv\") pod \"horizon-operator-controller-manager-68c6d99b8f-h9lh4\" (UID: \"a9b07484-f3d4-441d-8390-03d86f2ffe1f\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.322155 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf98w\" (UniqueName: \"kubernetes.io/projected/63ca78d6-7a48-4fcf-bac3-7215c2ca3282-kube-api-access-nf98w\") pod \"designate-operator-controller-manager-697fb699cf-4w9cf\" (UID: \"63ca78d6-7a48-4fcf-bac3-7215c2ca3282\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.322185 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbxcv\" (UniqueName: \"kubernetes.io/projected/09bdc1d3-b19f-4f25-b28a-e4e100108d48-kube-api-access-wbxcv\") pod \"glance-operator-controller-manager-5697bb5779-dr2p5\" (UID: \"09bdc1d3-b19f-4f25-b28a-e4e100108d48\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.322213 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgrjs\" (UniqueName: \"kubernetes.io/projected/3bf967ce-abdb-4d63-a262-861d238218e9-kube-api-access-pgrjs\") pod \"barbican-operator-controller-manager-7d9dfd778-g495k\" (UID: \"3bf967ce-abdb-4d63-a262-861d238218e9\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.322239 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfplq\" (UniqueName: \"kubernetes.io/projected/1e390e20-35af-4e6b-87cf-7cdd9fa55898-kube-api-access-pfplq\") pod \"heat-operator-controller-manager-5f64f6f8bb-jpd2s\" (UID: \"1e390e20-35af-4e6b-87cf-7cdd9fa55898\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.329022 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.330392 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.336097 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.337377 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.339294 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-k72gp" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.339473 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.353334 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-mhbt8" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.354834 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.378604 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.379939 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.384054 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnll2\" (UniqueName: \"kubernetes.io/projected/4637e86f-9342-431b-8bea-80027b740c6a-kube-api-access-wnll2\") pod \"cinder-operator-controller-manager-6c677c69b-v88tx\" (UID: \"4637e86f-9342-431b-8bea-80027b740c6a\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.390305 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgrjs\" (UniqueName: \"kubernetes.io/projected/3bf967ce-abdb-4d63-a262-861d238218e9-kube-api-access-pgrjs\") pod \"barbican-operator-controller-manager-7d9dfd778-g495k\" (UID: \"3bf967ce-abdb-4d63-a262-861d238218e9\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.390445 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.390822 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-k4qgg" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.403882 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf98w\" (UniqueName: \"kubernetes.io/projected/63ca78d6-7a48-4fcf-bac3-7215c2ca3282-kube-api-access-nf98w\") pod \"designate-operator-controller-manager-697fb699cf-4w9cf\" (UID: \"63ca78d6-7a48-4fcf-bac3-7215c2ca3282\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.410532 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.425544 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-955pv\" (UniqueName: \"kubernetes.io/projected/a9b07484-f3d4-441d-8390-03d86f2ffe1f-kube-api-access-955pv\") pod \"horizon-operator-controller-manager-68c6d99b8f-h9lh4\" (UID: \"a9b07484-f3d4-441d-8390-03d86f2ffe1f\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.425586 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbxcv\" (UniqueName: \"kubernetes.io/projected/09bdc1d3-b19f-4f25-b28a-e4e100108d48-kube-api-access-wbxcv\") pod \"glance-operator-controller-manager-5697bb5779-dr2p5\" (UID: \"09bdc1d3-b19f-4f25-b28a-e4e100108d48\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.425611 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfplq\" (UniqueName: \"kubernetes.io/projected/1e390e20-35af-4e6b-87cf-7cdd9fa55898-kube-api-access-pfplq\") pod \"heat-operator-controller-manager-5f64f6f8bb-jpd2s\" (UID: \"1e390e20-35af-4e6b-87cf-7cdd9fa55898\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.457845 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-955pv\" (UniqueName: \"kubernetes.io/projected/a9b07484-f3d4-441d-8390-03d86f2ffe1f-kube-api-access-955pv\") pod \"horizon-operator-controller-manager-68c6d99b8f-h9lh4\" (UID: \"a9b07484-f3d4-441d-8390-03d86f2ffe1f\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.457927 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.472629 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.473596 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.476623 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbxcv\" (UniqueName: \"kubernetes.io/projected/09bdc1d3-b19f-4f25-b28a-e4e100108d48-kube-api-access-wbxcv\") pod \"glance-operator-controller-manager-5697bb5779-dr2p5\" (UID: \"09bdc1d3-b19f-4f25-b28a-e4e100108d48\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.483572 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfplq\" (UniqueName: \"kubernetes.io/projected/1e390e20-35af-4e6b-87cf-7cdd9fa55898-kube-api-access-pfplq\") pod \"heat-operator-controller-manager-5f64f6f8bb-jpd2s\" (UID: \"1e390e20-35af-4e6b-87cf-7cdd9fa55898\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.487343 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-j6jp7" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.506024 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.506360 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.521418 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.526786 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.549423 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8djx\" (UniqueName: \"kubernetes.io/projected/ce821a6e-c155-4d30-aa89-f56d2348821d-kube-api-access-j8djx\") pod \"ironic-operator-controller-manager-967d97867-9mmdv\" (UID: \"ce821a6e-c155-4d30-aa89-f56d2348821d\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.549680 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-827vh\" (UniqueName: \"kubernetes.io/projected/1a555877-a028-46dd-bcf4-0202493c00b2-kube-api-access-827vh\") pod \"manila-operator-controller-manager-5b5fd79c9c-wb8sz\" (UID: \"1a555877-a028-46dd-bcf4-0202493c00b2\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.550091 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcn2m\" (UniqueName: \"kubernetes.io/projected/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-kube-api-access-jcn2m\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.550293 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.550415 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.550425 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zglvr\" (UniqueName: \"kubernetes.io/projected/6a6eb330-3cbb-44cd-aced-d66e6f3554e6-kube-api-access-zglvr\") pod \"keystone-operator-controller-manager-7765d96ddf-2sjnt\" (UID: \"6a6eb330-3cbb-44cd-aced-d66e6f3554e6\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.550556 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.557180 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-n8hgw" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.558249 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.565069 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.594393 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.602273 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.602361 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.619148 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.619273 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-rns4h" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.652375 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-827vh\" (UniqueName: \"kubernetes.io/projected/1a555877-a028-46dd-bcf4-0202493c00b2-kube-api-access-827vh\") pod \"manila-operator-controller-manager-5b5fd79c9c-wb8sz\" (UID: \"1a555877-a028-46dd-bcf4-0202493c00b2\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.652430 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29m5q\" (UniqueName: \"kubernetes.io/projected/a40ea926-2932-47de-89e0-1b7db3b1c6e9-kube-api-access-29m5q\") pod \"nova-operator-controller-manager-697bc559fc-r5zgb\" (UID: \"a40ea926-2932-47de-89e0-1b7db3b1c6e9\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.652457 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcn2m\" (UniqueName: \"kubernetes.io/projected/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-kube-api-access-jcn2m\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.652510 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsk2h\" (UniqueName: \"kubernetes.io/projected/8e1c1649-f8e1-4044-8c36-f4cfb12a929b-kube-api-access-bsk2h\") pod \"mariadb-operator-controller-manager-79c8c4686c-cc2w8\" (UID: \"8e1c1649-f8e1-4044-8c36-f4cfb12a929b\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.652542 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.652612 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zglvr\" (UniqueName: \"kubernetes.io/projected/6a6eb330-3cbb-44cd-aced-d66e6f3554e6-kube-api-access-zglvr\") pod \"keystone-operator-controller-manager-7765d96ddf-2sjnt\" (UID: \"6a6eb330-3cbb-44cd-aced-d66e6f3554e6\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.652656 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8djx\" (UniqueName: \"kubernetes.io/projected/ce821a6e-c155-4d30-aa89-f56d2348821d-kube-api-access-j8djx\") pod \"ironic-operator-controller-manager-967d97867-9mmdv\" (UID: \"ce821a6e-c155-4d30-aa89-f56d2348821d\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv" Dec 09 17:14:14 crc kubenswrapper[4840]: E1209 17:14:14.653225 4840 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:14 crc kubenswrapper[4840]: E1209 17:14:14.653320 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert podName:cf5c5d51-0dfb-414d-9f08-3c9be6400df5 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:15.153300544 +0000 UTC m=+1041.144411207 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert") pod "infra-operator-controller-manager-78d48bff9d-zgj4l" (UID: "cf5c5d51-0dfb-414d-9f08-3c9be6400df5") : secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.659726 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.668608 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.671533 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-g4kms" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.689016 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8djx\" (UniqueName: \"kubernetes.io/projected/ce821a6e-c155-4d30-aa89-f56d2348821d-kube-api-access-j8djx\") pod \"ironic-operator-controller-manager-967d97867-9mmdv\" (UID: \"ce821a6e-c155-4d30-aa89-f56d2348821d\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.689160 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.690427 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zglvr\" (UniqueName: \"kubernetes.io/projected/6a6eb330-3cbb-44cd-aced-d66e6f3554e6-kube-api-access-zglvr\") pod \"keystone-operator-controller-manager-7765d96ddf-2sjnt\" (UID: \"6a6eb330-3cbb-44cd-aced-d66e6f3554e6\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.691829 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-827vh\" (UniqueName: \"kubernetes.io/projected/1a555877-a028-46dd-bcf4-0202493c00b2-kube-api-access-827vh\") pod \"manila-operator-controller-manager-5b5fd79c9c-wb8sz\" (UID: \"1a555877-a028-46dd-bcf4-0202493c00b2\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.693777 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcn2m\" (UniqueName: \"kubernetes.io/projected/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-kube-api-access-jcn2m\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.699609 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.710849 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-vdldq"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.711991 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vdldq" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.721246 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-lqj2j" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.726606 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-vdldq"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.749854 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.754033 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29m5q\" (UniqueName: \"kubernetes.io/projected/a40ea926-2932-47de-89e0-1b7db3b1c6e9-kube-api-access-29m5q\") pod \"nova-operator-controller-manager-697bc559fc-r5zgb\" (UID: \"a40ea926-2932-47de-89e0-1b7db3b1c6e9\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.754086 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzvhw\" (UniqueName: \"kubernetes.io/projected/cdf3df3c-7d67-4096-95e2-779d5e413c46-kube-api-access-wzvhw\") pod \"octavia-operator-controller-manager-998648c74-vdldq\" (UID: \"cdf3df3c-7d67-4096-95e2-779d5e413c46\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-vdldq" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.754108 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwn86\" (UniqueName: \"kubernetes.io/projected/2ec42045-f3cc-4418-8744-d6397ec73843-kube-api-access-jwn86\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-mpgv7\" (UID: \"2ec42045-f3cc-4418-8744-d6397ec73843\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.754159 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsk2h\" (UniqueName: \"kubernetes.io/projected/8e1c1649-f8e1-4044-8c36-f4cfb12a929b-kube-api-access-bsk2h\") pod \"mariadb-operator-controller-manager-79c8c4686c-cc2w8\" (UID: \"8e1c1649-f8e1-4044-8c36-f4cfb12a929b\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.770141 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.773330 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-q5v56"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.775092 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.785598 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.788742 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-jc8qt" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.790544 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-pdgbd" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.795089 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-q5v56"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.804758 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29m5q\" (UniqueName: \"kubernetes.io/projected/a40ea926-2932-47de-89e0-1b7db3b1c6e9-kube-api-access-29m5q\") pod \"nova-operator-controller-manager-697bc559fc-r5zgb\" (UID: \"a40ea926-2932-47de-89e0-1b7db3b1c6e9\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.804953 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsk2h\" (UniqueName: \"kubernetes.io/projected/8e1c1649-f8e1-4044-8c36-f4cfb12a929b-kube-api-access-bsk2h\") pod \"mariadb-operator-controller-manager-79c8c4686c-cc2w8\" (UID: \"8e1c1649-f8e1-4044-8c36-f4cfb12a929b\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.803884 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.827790 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.829886 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.833089 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.833444 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.834422 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.834735 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-w929d" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.834809 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.834832 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.834890 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.843898 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.847946 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-z2whj" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.849467 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-bd2cq" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.865684 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xtdf\" (UniqueName: \"kubernetes.io/projected/fc539d46-da16-4f0b-8303-81fc7c35303b-kube-api-access-2xtdf\") pod \"swift-operator-controller-manager-9d58d64bc-x2hg6\" (UID: \"fc539d46-da16-4f0b-8303-81fc7c35303b\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.869102 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzvhw\" (UniqueName: \"kubernetes.io/projected/cdf3df3c-7d67-4096-95e2-779d5e413c46-kube-api-access-wzvhw\") pod \"octavia-operator-controller-manager-998648c74-vdldq\" (UID: \"cdf3df3c-7d67-4096-95e2-779d5e413c46\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-vdldq" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.869141 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwn86\" (UniqueName: \"kubernetes.io/projected/2ec42045-f3cc-4418-8744-d6397ec73843-kube-api-access-jwn86\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-mpgv7\" (UID: \"2ec42045-f3cc-4418-8744-d6397ec73843\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.869173 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.869293 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlpbx\" (UniqueName: \"kubernetes.io/projected/1f1441e7-48a5-433b-a3b7-882a3582ac88-kube-api-access-rlpbx\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.866484 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.870901 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.871025 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.878383 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-nsw7w" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.897858 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.905833 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.919340 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.922279 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzvhw\" (UniqueName: \"kubernetes.io/projected/cdf3df3c-7d67-4096-95e2-779d5e413c46-kube-api-access-wzvhw\") pod \"octavia-operator-controller-manager-998648c74-vdldq\" (UID: \"cdf3df3c-7d67-4096-95e2-779d5e413c46\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-vdldq" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.923232 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-fdfln" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.929375 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.939712 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz"] Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.942279 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.943643 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwn86\" (UniqueName: \"kubernetes.io/projected/2ec42045-f3cc-4418-8744-d6397ec73843-kube-api-access-jwn86\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-mpgv7\" (UID: \"2ec42045-f3cc-4418-8744-d6397ec73843\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.971373 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xtdf\" (UniqueName: \"kubernetes.io/projected/fc539d46-da16-4f0b-8303-81fc7c35303b-kube-api-access-2xtdf\") pod \"swift-operator-controller-manager-9d58d64bc-x2hg6\" (UID: \"fc539d46-da16-4f0b-8303-81fc7c35303b\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.971461 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kj7v2\" (UniqueName: \"kubernetes.io/projected/a70c50dc-fee0-4c02-8ae7-6e41429292ef-kube-api-access-kj7v2\") pod \"ovn-operator-controller-manager-b6456fdb6-67x7g\" (UID: \"a70c50dc-fee0-4c02-8ae7-6e41429292ef\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.971500 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9h48\" (UniqueName: \"kubernetes.io/projected/57253bfe-39c8-4ad9-99b4-b475a492083e-kube-api-access-g9h48\") pod \"telemetry-operator-controller-manager-796785f986-7mv2k\" (UID: \"57253bfe-39c8-4ad9-99b4-b475a492083e\") " pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.971565 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.971650 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlpbx\" (UniqueName: \"kubernetes.io/projected/1f1441e7-48a5-433b-a3b7-882a3582ac88-kube-api-access-rlpbx\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:14 crc kubenswrapper[4840]: E1209 17:14:14.971763 4840 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.971779 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rmpl\" (UniqueName: \"kubernetes.io/projected/fb5216a9-c43c-4eb4-ba33-affa2a72dbc4-kube-api-access-6rmpl\") pod \"watcher-operator-controller-manager-667bd8d554-xc9jz\" (UID: \"fb5216a9-c43c-4eb4-ba33-affa2a72dbc4\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" Dec 09 17:14:14 crc kubenswrapper[4840]: E1209 17:14:14.971824 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert podName:1f1441e7-48a5-433b-a3b7-882a3582ac88 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:15.471808325 +0000 UTC m=+1041.462918958 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fmcphk" (UID: "1f1441e7-48a5-433b-a3b7-882a3582ac88") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.971858 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfmgq\" (UniqueName: \"kubernetes.io/projected/298ab5a4-fb7d-42e4-8278-3972993456aa-kube-api-access-mfmgq\") pod \"placement-operator-controller-manager-78f8948974-q5v56\" (UID: \"298ab5a4-fb7d-42e4-8278-3972993456aa\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.971927 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhljd\" (UniqueName: \"kubernetes.io/projected/adbbc8e9-2553-4096-89a3-133ba5a752b6-kube-api-access-fhljd\") pod \"test-operator-controller-manager-5854674fcc-4kqr2\" (UID: \"adbbc8e9-2553-4096-89a3-133ba5a752b6\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.980878 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8" Dec 09 17:14:14 crc kubenswrapper[4840]: I1209 17:14:14.989711 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.004929 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xtdf\" (UniqueName: \"kubernetes.io/projected/fc539d46-da16-4f0b-8303-81fc7c35303b-kube-api-access-2xtdf\") pod \"swift-operator-controller-manager-9d58d64bc-x2hg6\" (UID: \"fc539d46-da16-4f0b-8303-81fc7c35303b\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.007037 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.010425 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh"] Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.011449 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.012794 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlpbx\" (UniqueName: \"kubernetes.io/projected/1f1441e7-48a5-433b-a3b7-882a3582ac88-kube-api-access-rlpbx\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.013111 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.015146 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh"] Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.016655 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-2p2fw" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.017189 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.051250 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vdldq" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.069062 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz"] Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.077700 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.078367 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l29mx\" (UniqueName: \"kubernetes.io/projected/6a1d9f43-3cd7-4480-a4f2-e88b82d972ab-kube-api-access-l29mx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-6xndz\" (UID: \"6a1d9f43-3cd7-4480-a4f2-e88b82d972ab\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.078444 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rmpl\" (UniqueName: \"kubernetes.io/projected/fb5216a9-c43c-4eb4-ba33-affa2a72dbc4-kube-api-access-6rmpl\") pod \"watcher-operator-controller-manager-667bd8d554-xc9jz\" (UID: \"fb5216a9-c43c-4eb4-ba33-affa2a72dbc4\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.078474 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.078504 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfmgq\" (UniqueName: \"kubernetes.io/projected/298ab5a4-fb7d-42e4-8278-3972993456aa-kube-api-access-mfmgq\") pod \"placement-operator-controller-manager-78f8948974-q5v56\" (UID: \"298ab5a4-fb7d-42e4-8278-3972993456aa\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.078531 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhljd\" (UniqueName: \"kubernetes.io/projected/adbbc8e9-2553-4096-89a3-133ba5a752b6-kube-api-access-fhljd\") pod \"test-operator-controller-manager-5854674fcc-4kqr2\" (UID: \"adbbc8e9-2553-4096-89a3-133ba5a752b6\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.078573 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kj7v2\" (UniqueName: \"kubernetes.io/projected/a70c50dc-fee0-4c02-8ae7-6e41429292ef-kube-api-access-kj7v2\") pod \"ovn-operator-controller-manager-b6456fdb6-67x7g\" (UID: \"a70c50dc-fee0-4c02-8ae7-6e41429292ef\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.078595 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9h48\" (UniqueName: \"kubernetes.io/projected/57253bfe-39c8-4ad9-99b4-b475a492083e-kube-api-access-g9h48\") pod \"telemetry-operator-controller-manager-796785f986-7mv2k\" (UID: \"57253bfe-39c8-4ad9-99b4-b475a492083e\") " pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.078629 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.078683 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbbtr\" (UniqueName: \"kubernetes.io/projected/a0c4ab40-b641-4154-a607-dfe342057b15-kube-api-access-sbbtr\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.083675 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz"] Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.094187 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-qwmxl" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.140951 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhljd\" (UniqueName: \"kubernetes.io/projected/adbbc8e9-2553-4096-89a3-133ba5a752b6-kube-api-access-fhljd\") pod \"test-operator-controller-manager-5854674fcc-4kqr2\" (UID: \"adbbc8e9-2553-4096-89a3-133ba5a752b6\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.145820 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rmpl\" (UniqueName: \"kubernetes.io/projected/fb5216a9-c43c-4eb4-ba33-affa2a72dbc4-kube-api-access-6rmpl\") pod \"watcher-operator-controller-manager-667bd8d554-xc9jz\" (UID: \"fb5216a9-c43c-4eb4-ba33-affa2a72dbc4\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.147674 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9h48\" (UniqueName: \"kubernetes.io/projected/57253bfe-39c8-4ad9-99b4-b475a492083e-kube-api-access-g9h48\") pod \"telemetry-operator-controller-manager-796785f986-7mv2k\" (UID: \"57253bfe-39c8-4ad9-99b4-b475a492083e\") " pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.179962 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l29mx\" (UniqueName: \"kubernetes.io/projected/6a1d9f43-3cd7-4480-a4f2-e88b82d972ab-kube-api-access-l29mx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-6xndz\" (UID: \"6a1d9f43-3cd7-4480-a4f2-e88b82d972ab\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.180077 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.180169 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.180216 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbbtr\" (UniqueName: \"kubernetes.io/projected/a0c4ab40-b641-4154-a607-dfe342057b15-kube-api-access-sbbtr\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.180264 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.180454 4840 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.180465 4840 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.180509 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert podName:cf5c5d51-0dfb-414d-9f08-3c9be6400df5 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:16.180489066 +0000 UTC m=+1042.171599699 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert") pod "infra-operator-controller-manager-78d48bff9d-zgj4l" (UID: "cf5c5d51-0dfb-414d-9f08-3c9be6400df5") : secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.180528 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:15.680519577 +0000 UTC m=+1041.671630210 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "metrics-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.180591 4840 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.180619 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:15.680611059 +0000 UTC m=+1041.671721692 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "webhook-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.182083 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kj7v2\" (UniqueName: \"kubernetes.io/projected/a70c50dc-fee0-4c02-8ae7-6e41429292ef-kube-api-access-kj7v2\") pod \"ovn-operator-controller-manager-b6456fdb6-67x7g\" (UID: \"a70c50dc-fee0-4c02-8ae7-6e41429292ef\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.208537 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfmgq\" (UniqueName: \"kubernetes.io/projected/298ab5a4-fb7d-42e4-8278-3972993456aa-kube-api-access-mfmgq\") pod \"placement-operator-controller-manager-78f8948974-q5v56\" (UID: \"298ab5a4-fb7d-42e4-8278-3972993456aa\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.219762 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbbtr\" (UniqueName: \"kubernetes.io/projected/a0c4ab40-b641-4154-a607-dfe342057b15-kube-api-access-sbbtr\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.226801 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l29mx\" (UniqueName: \"kubernetes.io/projected/6a1d9f43-3cd7-4480-a4f2-e88b82d972ab-kube-api-access-l29mx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-6xndz\" (UID: \"6a1d9f43-3cd7-4480-a4f2-e88b82d972ab\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.237128 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.310574 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.366785 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.407297 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.429451 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.487671 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.487822 4840 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.487873 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert podName:1f1441e7-48a5-433b-a3b7-882a3582ac88 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:16.487860181 +0000 UTC m=+1042.478970814 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fmcphk" (UID: "1f1441e7-48a5-433b-a3b7-882a3582ac88") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.659847 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.694919 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.695034 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.695162 4840 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.695209 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:16.695193804 +0000 UTC m=+1042.686304437 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "webhook-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.695282 4840 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: E1209 17:14:15.695353 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:16.695332078 +0000 UTC m=+1042.686442741 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "metrics-server-cert" not found Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.704227 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz" Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.915155 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx"] Dec 09 17:14:15 crc kubenswrapper[4840]: I1209 17:14:15.927582 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k"] Dec 09 17:14:15 crc kubenswrapper[4840]: W1209 17:14:15.938002 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3bf967ce_abdb_4d63_a262_861d238218e9.slice/crio-b4821223ac892d74e76edc096bb50b779b46d21812527bd99653f73344905a76 WatchSource:0}: Error finding container b4821223ac892d74e76edc096bb50b779b46d21812527bd99653f73344905a76: Status 404 returned error can't find the container with id b4821223ac892d74e76edc096bb50b779b46d21812527bd99653f73344905a76 Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.065318 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.082042 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.084618 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4"] Dec 09 17:14:16 crc kubenswrapper[4840]: W1209 17:14:16.098005 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a6eb330_3cbb_44cd_aced_d66e6f3554e6.slice/crio-c76f7fa51661dffe160392a9063e23bc60def1836cf1f505214b7a36519c1aaa WatchSource:0}: Error finding container c76f7fa51661dffe160392a9063e23bc60def1836cf1f505214b7a36519c1aaa: Status 404 returned error can't find the container with id c76f7fa51661dffe160392a9063e23bc60def1836cf1f505214b7a36519c1aaa Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.099902 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf"] Dec 09 17:14:16 crc kubenswrapper[4840]: W1209 17:14:16.101086 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63ca78d6_7a48_4fcf_bac3_7215c2ca3282.slice/crio-1bb2b46d2105d585f651580e4136850be41ef9e35765c07c14160f816f3bb68f WatchSource:0}: Error finding container 1bb2b46d2105d585f651580e4136850be41ef9e35765c07c14160f816f3bb68f: Status 404 returned error can't find the container with id 1bb2b46d2105d585f651580e4136850be41ef9e35765c07c14160f816f3bb68f Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.108209 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.116962 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.200935 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.201127 4840 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.201203 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert podName:cf5c5d51-0dfb-414d-9f08-3c9be6400df5 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:18.201185015 +0000 UTC m=+1044.192295648 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert") pod "infra-operator-controller-manager-78d48bff9d-zgj4l" (UID: "cf5c5d51-0dfb-414d-9f08-3c9be6400df5") : secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.265776 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.272135 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-vdldq"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.285022 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.297420 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k"] Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.306755 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.23:5001/openstack-k8s-operators/telemetry-operator:c4794e7165126ca78a1af546bb4ba50c90b5c4e1,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g9h48,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-796785f986-7mv2k_openstack-operators(57253bfe-39c8-4ad9-99b4-b475a492083e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: W1209 17:14:16.308150 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a555877_a028_46dd_bcf4_0202493c00b2.slice/crio-678eb38d0ca3c74a3ffd886d2971d9f46ad356bf7af9ae5ff3e3d66682029ce0 WatchSource:0}: Error finding container 678eb38d0ca3c74a3ffd886d2971d9f46ad356bf7af9ae5ff3e3d66682029ce0: Status 404 returned error can't find the container with id 678eb38d0ca3c74a3ffd886d2971d9f46ad356bf7af9ae5ff3e3d66682029ce0 Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.308949 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g9h48,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-796785f986-7mv2k_openstack-operators(57253bfe-39c8-4ad9-99b4-b475a492083e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.309875 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz"] Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.310214 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" podUID="57253bfe-39c8-4ad9-99b4-b475a492083e" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.314205 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-827vh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5b5fd79c9c-wb8sz_openstack-operators(1a555877-a028-46dd-bcf4-0202493c00b2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.321343 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-827vh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5b5fd79c9c-wb8sz_openstack-operators(1a555877-a028-46dd-bcf4-0202493c00b2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.323432 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" podUID="1a555877-a028-46dd-bcf4-0202493c00b2" Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.457676 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.477531 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.477572 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.481132 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.485705 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-q5v56"] Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.493376 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz"] Dec 09 17:14:16 crc kubenswrapper[4840]: W1209 17:14:16.503740 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod298ab5a4_fb7d_42e4_8278_3972993456aa.slice/crio-704f1f79d5319d662bbce6a61a3cf5a7fce745157655df2122b8aba8bda61bf0 WatchSource:0}: Error finding container 704f1f79d5319d662bbce6a61a3cf5a7fce745157655df2122b8aba8bda61bf0: Status 404 returned error can't find the container with id 704f1f79d5319d662bbce6a61a3cf5a7fce745157655df2122b8aba8bda61bf0 Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.506997 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.507188 4840 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.507240 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert podName:1f1441e7-48a5-433b-a3b7-882a3582ac88 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:18.507223314 +0000 UTC m=+1044.498333947 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fmcphk" (UID: "1f1441e7-48a5-433b-a3b7-882a3582ac88") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:16 crc kubenswrapper[4840]: W1209 17:14:16.507664 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a1d9f43_3cd7_4480_a4f2_e88b82d972ab.slice/crio-a529118e04794dd99e5af64e665d68cf6d552821ff7e81b048a869bf7c485e1f WatchSource:0}: Error finding container a529118e04794dd99e5af64e665d68cf6d552821ff7e81b048a869bf7c485e1f: Status 404 returned error can't find the container with id a529118e04794dd99e5af64e665d68cf6d552821ff7e81b048a869bf7c485e1f Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.507845 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mfmgq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-q5v56_openstack-operators(298ab5a4-fb7d-42e4-8278-3972993456aa): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.509386 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g"] Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.511143 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mfmgq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-q5v56_openstack-operators(298ab5a4-fb7d-42e4-8278-3972993456aa): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.512294 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" podUID="298ab5a4-fb7d-42e4-8278-3972993456aa" Dec 09 17:14:16 crc kubenswrapper[4840]: W1209 17:14:16.514517 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podadbbc8e9_2553_4096_89a3_133ba5a752b6.slice/crio-a4b07acf948f53db65c13c90c8a57ac928fc2f7088adf649049627b79cfff064 WatchSource:0}: Error finding container a4b07acf948f53db65c13c90c8a57ac928fc2f7088adf649049627b79cfff064: Status 404 returned error can't find the container with id a4b07acf948f53db65c13c90c8a57ac928fc2f7088adf649049627b79cfff064 Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.514678 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l29mx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-6xndz_openstack-operators(6a1d9f43-3cd7-4480-a4f2-e88b82d972ab): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.517953 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz" podUID="6a1d9f43-3cd7-4480-a4f2-e88b82d972ab" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.519660 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fhljd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-4kqr2_openstack-operators(adbbc8e9-2553-4096-89a3-133ba5a752b6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.521793 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fhljd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-4kqr2_openstack-operators(adbbc8e9-2553-4096-89a3-133ba5a752b6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.523097 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" podUID="adbbc8e9-2553-4096-89a3-133ba5a752b6" Dec 09 17:14:16 crc kubenswrapper[4840]: W1209 17:14:16.523256 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda40ea926_2932_47de_89e0_1b7db3b1c6e9.slice/crio-959050464cad3fd109f3d226257dfcf5bc9445621dff398d2e672838c9b6c661 WatchSource:0}: Error finding container 959050464cad3fd109f3d226257dfcf5bc9445621dff398d2e672838c9b6c661: Status 404 returned error can't find the container with id 959050464cad3fd109f3d226257dfcf5bc9445621dff398d2e672838c9b6c661 Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.523473 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6b3e0302608a2e70f9b5ae9167f6fbf59264f226d9db99d48f70466ab2f216b8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6rmpl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-667bd8d554-xc9jz_openstack-operators(fb5216a9-c43c-4eb4-ba33-affa2a72dbc4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.539673 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kj7v2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-67x7g_openstack-operators(a70c50dc-fee0-4c02-8ae7-6e41429292ef): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.539820 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-29m5q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-r5zgb_openstack-operators(a40ea926-2932-47de-89e0-1b7db3b1c6e9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.550138 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kj7v2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-67x7g_openstack-operators(a70c50dc-fee0-4c02-8ae7-6e41429292ef): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.550282 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-29m5q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-r5zgb_openstack-operators(a40ea926-2932-47de-89e0-1b7db3b1c6e9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.554059 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" podUID="a40ea926-2932-47de-89e0-1b7db3b1c6e9" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.554120 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" podUID="a70c50dc-fee0-4c02-8ae7-6e41429292ef" Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.709152 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.709335 4840 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.709402 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:18.709385766 +0000 UTC m=+1044.700496399 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "webhook-server-cert" not found Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.709349 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.709440 4840 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.709486 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:18.709473248 +0000 UTC m=+1044.700583881 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "metrics-server-cert" not found Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.870665 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" event={"ID":"1a555877-a028-46dd-bcf4-0202493c00b2","Type":"ContainerStarted","Data":"678eb38d0ca3c74a3ffd886d2971d9f46ad356bf7af9ae5ff3e3d66682029ce0"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.871941 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt" event={"ID":"6a6eb330-3cbb-44cd-aced-d66e6f3554e6","Type":"ContainerStarted","Data":"c76f7fa51661dffe160392a9063e23bc60def1836cf1f505214b7a36519c1aaa"} Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.873852 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" podUID="1a555877-a028-46dd-bcf4-0202493c00b2" Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.873852 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6" event={"ID":"fc539d46-da16-4f0b-8303-81fc7c35303b","Type":"ContainerStarted","Data":"f74bb60b637f98b26ec2ac1c1fb1c95ab2ecb6503f76949c536882bfd4fb8d8e"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.877328 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7" event={"ID":"2ec42045-f3cc-4418-8744-d6397ec73843","Type":"ContainerStarted","Data":"ef6efc88b72ccdf8c5d4c5a852b5a8093f7b95829c9e063d24907d6fa5e679d7"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.879133 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" event={"ID":"a9b07484-f3d4-441d-8390-03d86f2ffe1f","Type":"ContainerStarted","Data":"592548b6b585b73dec03151734fa3ff59f7e353ed6532c26f6490bd04a28d02a"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.880693 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k" event={"ID":"3bf967ce-abdb-4d63-a262-861d238218e9","Type":"ContainerStarted","Data":"b4821223ac892d74e76edc096bb50b779b46d21812527bd99653f73344905a76"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.886225 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz" event={"ID":"6a1d9f43-3cd7-4480-a4f2-e88b82d972ab","Type":"ContainerStarted","Data":"a529118e04794dd99e5af64e665d68cf6d552821ff7e81b048a869bf7c485e1f"} Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.891106 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz" podUID="6a1d9f43-3cd7-4480-a4f2-e88b82d972ab" Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.894878 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv" event={"ID":"ce821a6e-c155-4d30-aa89-f56d2348821d","Type":"ContainerStarted","Data":"a756295ddb8cfcc6fe3f9f02493cfaf17630143a7ca4a62a4e8b1273126c80d7"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.897569 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" event={"ID":"57253bfe-39c8-4ad9-99b4-b475a492083e","Type":"ContainerStarted","Data":"a20cb69f22e7a34fabd6fb9cc29e63b4ca4574a9dbcb4d574da5b08ad8a2a35c"} Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.901365 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.23:5001/openstack-k8s-operators/telemetry-operator:c4794e7165126ca78a1af546bb4ba50c90b5c4e1\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" podUID="57253bfe-39c8-4ad9-99b4-b475a492083e" Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.902135 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8" event={"ID":"8e1c1649-f8e1-4044-8c36-f4cfb12a929b","Type":"ContainerStarted","Data":"1142e9d019db438e27f988e6f4e58582852fe323023d0aaba81c8bc529a11f45"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.909862 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vdldq" event={"ID":"cdf3df3c-7d67-4096-95e2-779d5e413c46","Type":"ContainerStarted","Data":"ec6788e207e9c19f5101345afc03ca29377e6278f470c85f1659ade462530931"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.918005 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf" event={"ID":"63ca78d6-7a48-4fcf-bac3-7215c2ca3282","Type":"ContainerStarted","Data":"1bb2b46d2105d585f651580e4136850be41ef9e35765c07c14160f816f3bb68f"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.920102 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5" event={"ID":"09bdc1d3-b19f-4f25-b28a-e4e100108d48","Type":"ContainerStarted","Data":"57e7b4625ae8fb43d02171ae5fb55c46c3d2d7a6570f3d5cc4bc13bb59edc1b2"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.921205 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" event={"ID":"298ab5a4-fb7d-42e4-8278-3972993456aa","Type":"ContainerStarted","Data":"704f1f79d5319d662bbce6a61a3cf5a7fce745157655df2122b8aba8bda61bf0"} Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.926254 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" podUID="298ab5a4-fb7d-42e4-8278-3972993456aa" Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.929872 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" event={"ID":"1e390e20-35af-4e6b-87cf-7cdd9fa55898","Type":"ContainerStarted","Data":"3dc52aef131cddcbf6617156e772b1cf9fc45a51edad62659b306962b8b97ff5"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.934455 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" event={"ID":"a40ea926-2932-47de-89e0-1b7db3b1c6e9","Type":"ContainerStarted","Data":"959050464cad3fd109f3d226257dfcf5bc9445621dff398d2e672838c9b6c661"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.936296 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" event={"ID":"adbbc8e9-2553-4096-89a3-133ba5a752b6","Type":"ContainerStarted","Data":"a4b07acf948f53db65c13c90c8a57ac928fc2f7088adf649049627b79cfff064"} Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.938060 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" podUID="a40ea926-2932-47de-89e0-1b7db3b1c6e9" Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.938202 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" podUID="adbbc8e9-2553-4096-89a3-133ba5a752b6" Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.940112 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx" event={"ID":"4637e86f-9342-431b-8bea-80027b740c6a","Type":"ContainerStarted","Data":"7dada6ffe4a5250eb8156ea06a72d6ae7986bc370be4b61de3e6108f43baedf7"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.942429 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" event={"ID":"fb5216a9-c43c-4eb4-ba33-affa2a72dbc4","Type":"ContainerStarted","Data":"3b52efc46c9220d099ed68ff7473068af83c4e0c8fc012091454832dca91b7d1"} Dec 09 17:14:16 crc kubenswrapper[4840]: I1209 17:14:16.952685 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" event={"ID":"a70c50dc-fee0-4c02-8ae7-6e41429292ef","Type":"ContainerStarted","Data":"5e340468a615225d5976b67f9e223067527dad435fb76d6466382a5a69b9d46e"} Dec 09 17:14:16 crc kubenswrapper[4840]: E1209 17:14:16.955758 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" podUID="a70c50dc-fee0-4c02-8ae7-6e41429292ef" Dec 09 17:14:17 crc kubenswrapper[4840]: E1209 17:14:17.980462 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz" podUID="6a1d9f43-3cd7-4480-a4f2-e88b82d972ab" Dec 09 17:14:17 crc kubenswrapper[4840]: E1209 17:14:17.981893 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" podUID="a70c50dc-fee0-4c02-8ae7-6e41429292ef" Dec 09 17:14:17 crc kubenswrapper[4840]: E1209 17:14:17.981945 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" podUID="298ab5a4-fb7d-42e4-8278-3972993456aa" Dec 09 17:14:17 crc kubenswrapper[4840]: E1209 17:14:17.983454 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.23:5001/openstack-k8s-operators/telemetry-operator:c4794e7165126ca78a1af546bb4ba50c90b5c4e1\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" podUID="57253bfe-39c8-4ad9-99b4-b475a492083e" Dec 09 17:14:17 crc kubenswrapper[4840]: E1209 17:14:17.983519 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" podUID="1a555877-a028-46dd-bcf4-0202493c00b2" Dec 09 17:14:17 crc kubenswrapper[4840]: E1209 17:14:17.983560 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" podUID="adbbc8e9-2553-4096-89a3-133ba5a752b6" Dec 09 17:14:18 crc kubenswrapper[4840]: E1209 17:14:18.005698 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" podUID="a40ea926-2932-47de-89e0-1b7db3b1c6e9" Dec 09 17:14:18 crc kubenswrapper[4840]: I1209 17:14:18.235313 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:18 crc kubenswrapper[4840]: E1209 17:14:18.235324 4840 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:18 crc kubenswrapper[4840]: E1209 17:14:18.236410 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert podName:cf5c5d51-0dfb-414d-9f08-3c9be6400df5 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:22.236390605 +0000 UTC m=+1048.227501238 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert") pod "infra-operator-controller-manager-78d48bff9d-zgj4l" (UID: "cf5c5d51-0dfb-414d-9f08-3c9be6400df5") : secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:18 crc kubenswrapper[4840]: I1209 17:14:18.539425 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:18 crc kubenswrapper[4840]: E1209 17:14:18.539579 4840 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:18 crc kubenswrapper[4840]: E1209 17:14:18.539655 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert podName:1f1441e7-48a5-433b-a3b7-882a3582ac88 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:22.539638008 +0000 UTC m=+1048.530748641 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fmcphk" (UID: "1f1441e7-48a5-433b-a3b7-882a3582ac88") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:18 crc kubenswrapper[4840]: I1209 17:14:18.742440 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:18 crc kubenswrapper[4840]: I1209 17:14:18.742547 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:18 crc kubenswrapper[4840]: E1209 17:14:18.743911 4840 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 17:14:18 crc kubenswrapper[4840]: E1209 17:14:18.744011 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:22.74398847 +0000 UTC m=+1048.735099103 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "metrics-server-cert" not found Dec 09 17:14:18 crc kubenswrapper[4840]: E1209 17:14:18.744217 4840 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 17:14:18 crc kubenswrapper[4840]: E1209 17:14:18.744456 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:22.744444272 +0000 UTC m=+1048.735554905 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "webhook-server-cert" not found Dec 09 17:14:22 crc kubenswrapper[4840]: I1209 17:14:22.299012 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:22 crc kubenswrapper[4840]: E1209 17:14:22.299232 4840 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:22 crc kubenswrapper[4840]: E1209 17:14:22.299567 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert podName:cf5c5d51-0dfb-414d-9f08-3c9be6400df5 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:30.299541135 +0000 UTC m=+1056.290651818 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert") pod "infra-operator-controller-manager-78d48bff9d-zgj4l" (UID: "cf5c5d51-0dfb-414d-9f08-3c9be6400df5") : secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:22 crc kubenswrapper[4840]: I1209 17:14:22.603861 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:22 crc kubenswrapper[4840]: E1209 17:14:22.604090 4840 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:22 crc kubenswrapper[4840]: E1209 17:14:22.604173 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert podName:1f1441e7-48a5-433b-a3b7-882a3582ac88 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:30.604151236 +0000 UTC m=+1056.595261859 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fmcphk" (UID: "1f1441e7-48a5-433b-a3b7-882a3582ac88") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:22 crc kubenswrapper[4840]: I1209 17:14:22.805660 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:22 crc kubenswrapper[4840]: E1209 17:14:22.805773 4840 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 17:14:22 crc kubenswrapper[4840]: I1209 17:14:22.806109 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:22 crc kubenswrapper[4840]: E1209 17:14:22.806149 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:30.806131402 +0000 UTC m=+1056.797242035 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "metrics-server-cert" not found Dec 09 17:14:22 crc kubenswrapper[4840]: E1209 17:14:22.806241 4840 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 17:14:22 crc kubenswrapper[4840]: E1209 17:14:22.806310 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:30.806291177 +0000 UTC m=+1056.797401910 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "webhook-server-cert" not found Dec 09 17:14:29 crc kubenswrapper[4840]: E1209 17:14:29.432335 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 09 17:14:29 crc kubenswrapper[4840]: E1209 17:14:29.433088 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6rmpl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-667bd8d554-xc9jz_openstack-operators(fb5216a9-c43c-4eb4-ba33-affa2a72dbc4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 17:14:29 crc kubenswrapper[4840]: E1209 17:14:29.434249 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" podUID="fb5216a9-c43c-4eb4-ba33-affa2a72dbc4" Dec 09 17:14:29 crc kubenswrapper[4840]: E1209 17:14:29.932252 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-955pv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-68c6d99b8f-h9lh4_openstack-operators(a9b07484-f3d4-441d-8390-03d86f2ffe1f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:29 crc kubenswrapper[4840]: E1209 17:14:29.932475 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pfplq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5f64f6f8bb-jpd2s_openstack-operators(1e390e20-35af-4e6b-87cf-7cdd9fa55898): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 17:14:29 crc kubenswrapper[4840]: E1209 17:14:29.934124 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" podUID="1e390e20-35af-4e6b-87cf-7cdd9fa55898" Dec 09 17:14:29 crc kubenswrapper[4840]: E1209 17:14:29.934149 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" podUID="a9b07484-f3d4-441d-8390-03d86f2ffe1f" Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.074376 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf" event={"ID":"63ca78d6-7a48-4fcf-bac3-7215c2ca3282","Type":"ContainerStarted","Data":"f43baa0658f15e32cb0238f9f63ce50feb8f7dec358ac53e9af3006733e4cb28"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.076642 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv" event={"ID":"ce821a6e-c155-4d30-aa89-f56d2348821d","Type":"ContainerStarted","Data":"f28c42a973558cc9292c784c6da05ac7a02872957d32f7fdf7fa3ea76ed829d0"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.086688 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" event={"ID":"1e390e20-35af-4e6b-87cf-7cdd9fa55898","Type":"ContainerStarted","Data":"be9f7211b69955e3461710d652e67978eb5ee23ed61e6fdee7ca65ecf6fda25f"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.086798 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" Dec 09 17:14:30 crc kubenswrapper[4840]: E1209 17:14:30.088015 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" podUID="1e390e20-35af-4e6b-87cf-7cdd9fa55898" Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.089957 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" event={"ID":"a9b07484-f3d4-441d-8390-03d86f2ffe1f","Type":"ContainerStarted","Data":"f4078fca97287d07437182209dbb57aa00363a72ba7f59b63a473d935b5370fc"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.090261 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" Dec 09 17:14:30 crc kubenswrapper[4840]: E1209 17:14:30.095484 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" podUID="a9b07484-f3d4-441d-8390-03d86f2ffe1f" Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.099033 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k" event={"ID":"3bf967ce-abdb-4d63-a262-861d238218e9","Type":"ContainerStarted","Data":"dbb403b9a89464fa540f836c9f499ce6a44d5e365cf582e6bfd4715f4a925266"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.100610 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8" event={"ID":"8e1c1649-f8e1-4044-8c36-f4cfb12a929b","Type":"ContainerStarted","Data":"03768f5bf1f327196a8fab8c3af6d1782095f4095542317091cb507651c64f75"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.104238 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt" event={"ID":"6a6eb330-3cbb-44cd-aced-d66e6f3554e6","Type":"ContainerStarted","Data":"b2e59ac74753eade9432de41dab1ae76eca1e545218a4d0a1f0f7918a6a5ad56"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.108869 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5" event={"ID":"09bdc1d3-b19f-4f25-b28a-e4e100108d48","Type":"ContainerStarted","Data":"46a8fe0c13b6f130829fee8a8b775bab9262483d8b10e1e70cc4c9cfcf96cea2"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.110891 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx" event={"ID":"4637e86f-9342-431b-8bea-80027b740c6a","Type":"ContainerStarted","Data":"c40965b46c9a35196dcc40e54d13af1fdeddd087c78ef5ef6ca75b960678f5d8"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.112605 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6" event={"ID":"fc539d46-da16-4f0b-8303-81fc7c35303b","Type":"ContainerStarted","Data":"7c99862095dcbce6cddde893928a44450c219b981343ecf6469745d63b1ae51a"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.115931 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7" event={"ID":"2ec42045-f3cc-4418-8744-d6397ec73843","Type":"ContainerStarted","Data":"d1759a0f887fb3644b5ae275aa6d52decd76b6945a8ed9ceed7e08a4a7fd09b6"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.118752 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vdldq" event={"ID":"cdf3df3c-7d67-4096-95e2-779d5e413c46","Type":"ContainerStarted","Data":"bfae3741313ee84d6a10c966a8c6f620329f5053f08296a36a90b5e57b3bb1fe"} Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.323721 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:30 crc kubenswrapper[4840]: E1209 17:14:30.323908 4840 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:30 crc kubenswrapper[4840]: E1209 17:14:30.323950 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert podName:cf5c5d51-0dfb-414d-9f08-3c9be6400df5 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:46.323937182 +0000 UTC m=+1072.315047815 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert") pod "infra-operator-controller-manager-78d48bff9d-zgj4l" (UID: "cf5c5d51-0dfb-414d-9f08-3c9be6400df5") : secret "infra-operator-webhook-server-cert" not found Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.633662 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:30 crc kubenswrapper[4840]: E1209 17:14:30.633873 4840 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:30 crc kubenswrapper[4840]: E1209 17:14:30.638323 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert podName:1f1441e7-48a5-433b-a3b7-882a3582ac88 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:46.638294669 +0000 UTC m=+1072.629405292 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fmcphk" (UID: "1f1441e7-48a5-433b-a3b7-882a3582ac88") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.840941 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:30 crc kubenswrapper[4840]: I1209 17:14:30.841063 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:30 crc kubenswrapper[4840]: E1209 17:14:30.841206 4840 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 17:14:30 crc kubenswrapper[4840]: E1209 17:14:30.841255 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:46.841242802 +0000 UTC m=+1072.832353435 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "metrics-server-cert" not found Dec 09 17:14:30 crc kubenswrapper[4840]: E1209 17:14:30.841297 4840 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 17:14:30 crc kubenswrapper[4840]: E1209 17:14:30.841316 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs podName:a0c4ab40-b641-4154-a607-dfe342057b15 nodeName:}" failed. No retries permitted until 2025-12-09 17:14:46.841309954 +0000 UTC m=+1072.832420587 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs") pod "openstack-operator-controller-manager-866b78c4d6-gnpwh" (UID: "a0c4ab40-b641-4154-a607-dfe342057b15") : secret "webhook-server-cert" not found Dec 09 17:14:31 crc kubenswrapper[4840]: I1209 17:14:31.128535 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv" event={"ID":"ce821a6e-c155-4d30-aa89-f56d2348821d","Type":"ContainerStarted","Data":"2a5b62591f906afd8175bd4207c010fffab8752201c913b391d9e3877f7333cc"} Dec 09 17:14:31 crc kubenswrapper[4840]: I1209 17:14:31.129429 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv" Dec 09 17:14:31 crc kubenswrapper[4840]: I1209 17:14:31.136550 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf" event={"ID":"63ca78d6-7a48-4fcf-bac3-7215c2ca3282","Type":"ContainerStarted","Data":"3ff3d927e9ecac13c061d2f6f85e6d4018696dedc712ebe48a04b55211828267"} Dec 09 17:14:31 crc kubenswrapper[4840]: I1209 17:14:31.137183 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf" Dec 09 17:14:31 crc kubenswrapper[4840]: I1209 17:14:31.141798 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt" event={"ID":"6a6eb330-3cbb-44cd-aced-d66e6f3554e6","Type":"ContainerStarted","Data":"a9a24c97b7c73683ec0f1457fcad515a32074200987b12965203e622dd9a03eb"} Dec 09 17:14:31 crc kubenswrapper[4840]: I1209 17:14:31.141846 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt" Dec 09 17:14:31 crc kubenswrapper[4840]: I1209 17:14:31.153210 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv" podStartSLOduration=3.200988892 podStartE2EDuration="17.153195924s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.495148103 +0000 UTC m=+1042.486258736" lastFinishedPulling="2025-12-09 17:14:30.447355135 +0000 UTC m=+1056.438465768" observedRunningTime="2025-12-09 17:14:31.152363491 +0000 UTC m=+1057.143474124" watchObservedRunningTime="2025-12-09 17:14:31.153195924 +0000 UTC m=+1057.144306557" Dec 09 17:14:31 crc kubenswrapper[4840]: I1209 17:14:31.181946 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt" podStartSLOduration=2.918794426 podStartE2EDuration="17.181932522s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.100476164 +0000 UTC m=+1042.091586797" lastFinishedPulling="2025-12-09 17:14:30.36361426 +0000 UTC m=+1056.354724893" observedRunningTime="2025-12-09 17:14:31.179086114 +0000 UTC m=+1057.170196747" watchObservedRunningTime="2025-12-09 17:14:31.181932522 +0000 UTC m=+1057.173043155" Dec 09 17:14:31 crc kubenswrapper[4840]: I1209 17:14:31.222514 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf" podStartSLOduration=2.967548003 podStartE2EDuration="17.222495604s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.103317002 +0000 UTC m=+1042.094427635" lastFinishedPulling="2025-12-09 17:14:30.358264603 +0000 UTC m=+1056.349375236" observedRunningTime="2025-12-09 17:14:31.217079805 +0000 UTC m=+1057.208190438" watchObservedRunningTime="2025-12-09 17:14:31.222495604 +0000 UTC m=+1057.213606237" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.152101 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" event={"ID":"1e390e20-35af-4e6b-87cf-7cdd9fa55898","Type":"ContainerStarted","Data":"9bcebccaae9802da4c132c4f76adc9847ae0b05d631b5b58e0f64aa5c92d5dc1"} Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.157088 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5" event={"ID":"09bdc1d3-b19f-4f25-b28a-e4e100108d48","Type":"ContainerStarted","Data":"c95b674c335a470640143222be944c4e392361c268972a0dc329a6c9bc3d1c95"} Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.157199 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.159413 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx" event={"ID":"4637e86f-9342-431b-8bea-80027b740c6a","Type":"ContainerStarted","Data":"881aa318eb467bf4f7f5113db816500fe1a59de813e43536d130dd606b30a66e"} Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.159526 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.161564 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" event={"ID":"a9b07484-f3d4-441d-8390-03d86f2ffe1f","Type":"ContainerStarted","Data":"4fe0e0aebb37efd012d03a0e1f5da7e670fdf362912dd01a6409c30f6ab8a2be"} Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.163836 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k" event={"ID":"3bf967ce-abdb-4d63-a262-861d238218e9","Type":"ContainerStarted","Data":"1f132643bb8f1d76181f6b1ef2f209fa5e04e5ac93a7fb399244ef1f970cb148"} Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.163900 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.169885 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8" event={"ID":"8e1c1649-f8e1-4044-8c36-f4cfb12a929b","Type":"ContainerStarted","Data":"3accc1a066945b61dd723df36fa27eb1ac617be6eaf7f7d84f7839a9224b27e6"} Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.170006 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.182289 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" podStartSLOduration=5.292048731 podStartE2EDuration="18.182269063s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.106388616 +0000 UTC m=+1042.097499249" lastFinishedPulling="2025-12-09 17:14:28.996608948 +0000 UTC m=+1054.987719581" observedRunningTime="2025-12-09 17:14:32.167230991 +0000 UTC m=+1058.158341624" watchObservedRunningTime="2025-12-09 17:14:32.182269063 +0000 UTC m=+1058.173379696" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.195283 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5" podStartSLOduration=3.483135296 podStartE2EDuration="18.19526544s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.083009845 +0000 UTC m=+1042.074120478" lastFinishedPulling="2025-12-09 17:14:30.795139989 +0000 UTC m=+1056.786250622" observedRunningTime="2025-12-09 17:14:32.19200206 +0000 UTC m=+1058.183112693" watchObservedRunningTime="2025-12-09 17:14:32.19526544 +0000 UTC m=+1058.186376083" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.211495 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vdldq" event={"ID":"cdf3df3c-7d67-4096-95e2-779d5e413c46","Type":"ContainerStarted","Data":"72edd06e700e8ba82271efaf8e5e33fbce9d32ecbf834d04e179cbe1095c95d7"} Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.211546 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vdldq" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.222806 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx" podStartSLOduration=3.398218038 podStartE2EDuration="18.222789324s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:15.930308839 +0000 UTC m=+1041.921419472" lastFinishedPulling="2025-12-09 17:14:30.754880135 +0000 UTC m=+1056.745990758" observedRunningTime="2025-12-09 17:14:32.221247262 +0000 UTC m=+1058.212357905" watchObservedRunningTime="2025-12-09 17:14:32.222789324 +0000 UTC m=+1058.213899957" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.257016 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k" podStartSLOduration=3.537227319 podStartE2EDuration="18.256995162s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:15.94128024 +0000 UTC m=+1041.932390863" lastFinishedPulling="2025-12-09 17:14:30.661048073 +0000 UTC m=+1056.652158706" observedRunningTime="2025-12-09 17:14:32.251325347 +0000 UTC m=+1058.242435980" watchObservedRunningTime="2025-12-09 17:14:32.256995162 +0000 UTC m=+1058.248105795" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.270979 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" podStartSLOduration=5.372881298 podStartE2EDuration="18.270948334s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.100756052 +0000 UTC m=+1042.091866685" lastFinishedPulling="2025-12-09 17:14:28.998823088 +0000 UTC m=+1054.989933721" observedRunningTime="2025-12-09 17:14:32.26422151 +0000 UTC m=+1058.255332143" watchObservedRunningTime="2025-12-09 17:14:32.270948334 +0000 UTC m=+1058.262058967" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.288728 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8" podStartSLOduration=3.766033871 podStartE2EDuration="18.288712521s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.272725506 +0000 UTC m=+1042.263836139" lastFinishedPulling="2025-12-09 17:14:30.795404156 +0000 UTC m=+1056.786514789" observedRunningTime="2025-12-09 17:14:32.284213448 +0000 UTC m=+1058.275324081" watchObservedRunningTime="2025-12-09 17:14:32.288712521 +0000 UTC m=+1058.279823154" Dec 09 17:14:32 crc kubenswrapper[4840]: I1209 17:14:32.310408 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vdldq" podStartSLOduration=3.891461619 podStartE2EDuration="18.310385045s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.272989323 +0000 UTC m=+1042.264099956" lastFinishedPulling="2025-12-09 17:14:30.691912749 +0000 UTC m=+1056.683023382" observedRunningTime="2025-12-09 17:14:32.304842764 +0000 UTC m=+1058.295953407" watchObservedRunningTime="2025-12-09 17:14:32.310385045 +0000 UTC m=+1058.301495688" Dec 09 17:14:33 crc kubenswrapper[4840]: I1209 17:14:33.219795 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6" event={"ID":"fc539d46-da16-4f0b-8303-81fc7c35303b","Type":"ContainerStarted","Data":"7f798a2c0075e645fb146e87fba52a3d46ec06c1500fbac8c9ef4177ca8ae7e4"} Dec 09 17:14:33 crc kubenswrapper[4840]: I1209 17:14:33.220132 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6" Dec 09 17:14:33 crc kubenswrapper[4840]: I1209 17:14:33.223864 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7" event={"ID":"2ec42045-f3cc-4418-8744-d6397ec73843","Type":"ContainerStarted","Data":"0c2c8fa8a41e4c4eae21920cd5b0316c5f54dba8c4661ea89cdb7cc04a3c6025"} Dec 09 17:14:33 crc kubenswrapper[4840]: I1209 17:14:33.223891 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7" Dec 09 17:14:33 crc kubenswrapper[4840]: I1209 17:14:33.253867 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6" podStartSLOduration=3.143889237 podStartE2EDuration="19.253846698s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.300745584 +0000 UTC m=+1042.291856217" lastFinishedPulling="2025-12-09 17:14:32.410703045 +0000 UTC m=+1058.401813678" observedRunningTime="2025-12-09 17:14:33.238300142 +0000 UTC m=+1059.229410775" watchObservedRunningTime="2025-12-09 17:14:33.253846698 +0000 UTC m=+1059.244957331" Dec 09 17:14:33 crc kubenswrapper[4840]: I1209 17:14:33.261246 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7" podStartSLOduration=3.000439844 podStartE2EDuration="19.26122351s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.091402895 +0000 UTC m=+1042.082513528" lastFinishedPulling="2025-12-09 17:14:32.352186561 +0000 UTC m=+1058.343297194" observedRunningTime="2025-12-09 17:14:33.251461163 +0000 UTC m=+1059.242571806" watchObservedRunningTime="2025-12-09 17:14:33.26122351 +0000 UTC m=+1059.252334143" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.036395 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.036450 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.235230 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-x2hg6" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.235481 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mpgv7" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.512241 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-g495k" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.530577 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-v88tx" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.541090 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4w9cf" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.563001 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-dr2p5" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.599213 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpd2s" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.627367 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-h9lh4" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.901362 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-967d97867-9mmdv" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.934625 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-2sjnt" Dec 09 17:14:34 crc kubenswrapper[4840]: I1209 17:14:34.987138 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-cc2w8" Dec 09 17:14:35 crc kubenswrapper[4840]: I1209 17:14:35.054488 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vdldq" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.293642 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" event={"ID":"298ab5a4-fb7d-42e4-8278-3972993456aa","Type":"ContainerStarted","Data":"87f38ad10076d0a54a448351104e521e2a99d6e2544a148ebf17aefd5109ee3a"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.294063 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" event={"ID":"298ab5a4-fb7d-42e4-8278-3972993456aa","Type":"ContainerStarted","Data":"b2a22ead4dfa2f71956cb95ac66438b6367e4a6da006fa58ff97f05734e9b5c4"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.294851 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.297002 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" event={"ID":"fb5216a9-c43c-4eb4-ba33-affa2a72dbc4","Type":"ContainerStarted","Data":"c5b8de0d72acea4c26bbd170fb1a54f31830c11128c75f905e954a6e5d6d0011"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.297026 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" event={"ID":"fb5216a9-c43c-4eb4-ba33-affa2a72dbc4","Type":"ContainerStarted","Data":"c4713f184deb249ac863afa25ebab1526ffd442d942a89301ea994cca898e98d"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.297457 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.299081 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz" event={"ID":"6a1d9f43-3cd7-4480-a4f2-e88b82d972ab","Type":"ContainerStarted","Data":"ca4ccc0d0e56163b84d33233696bbdeca9a91994919705ef691a3cffaf80dc34"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.300819 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" event={"ID":"a70c50dc-fee0-4c02-8ae7-6e41429292ef","Type":"ContainerStarted","Data":"9e7c9c00f665f135175d94a536fcdf54eb085d4e23ef0ea9b6584623af3aa394"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.300843 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" event={"ID":"a70c50dc-fee0-4c02-8ae7-6e41429292ef","Type":"ContainerStarted","Data":"ffac8f9673304e93fa5a2b778101ed424eef4dedfd743d74ca75f64c14ce5653"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.301174 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.302541 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" event={"ID":"a40ea926-2932-47de-89e0-1b7db3b1c6e9","Type":"ContainerStarted","Data":"2ca615f7167b7fb18b23ea8edbc0103a99ca16319c612f9243403811c3b2a705"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.302563 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" event={"ID":"a40ea926-2932-47de-89e0-1b7db3b1c6e9","Type":"ContainerStarted","Data":"f85274b3008e395c28019e1460bcf9a7771ca58c42547d4a50aced5c752900f5"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.302888 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.306060 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" event={"ID":"1a555877-a028-46dd-bcf4-0202493c00b2","Type":"ContainerStarted","Data":"f2c614a7a699e1f0c16ee75ddda06a28f2462a6b89826c9cc1d095cb7e1bcc98"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.306086 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" event={"ID":"1a555877-a028-46dd-bcf4-0202493c00b2","Type":"ContainerStarted","Data":"fc9f23e092e894ce9ea81c989d83c443fd874497a21c641676ad2b7e7c8008c9"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.306484 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.307816 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" event={"ID":"adbbc8e9-2553-4096-89a3-133ba5a752b6","Type":"ContainerStarted","Data":"ba4afd6151c7df3aafe9bcbbd8c9b4e04b87fe4f89b5b4718ccd63ad81ea816c"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.307838 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" event={"ID":"adbbc8e9-2553-4096-89a3-133ba5a752b6","Type":"ContainerStarted","Data":"2e32667af70b60aff08143c4443c7a1057c6b5e43d53294a99e42e7969d188a5"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.308180 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.309461 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" event={"ID":"57253bfe-39c8-4ad9-99b4-b475a492083e","Type":"ContainerStarted","Data":"1864833b104497193ca4f83837be0d6cff85f42006854014a0eb0ad5dbf6ab3d"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.309485 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" event={"ID":"57253bfe-39c8-4ad9-99b4-b475a492083e","Type":"ContainerStarted","Data":"24cdc7136d413c9cf02a6fe273a90cac43a81e69859e0e16622e842a42f8bfc3"} Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.309799 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.345288 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" podStartSLOduration=3.585768148 podStartE2EDuration="27.345271842s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.507719057 +0000 UTC m=+1042.498829690" lastFinishedPulling="2025-12-09 17:14:40.267222751 +0000 UTC m=+1066.258333384" observedRunningTime="2025-12-09 17:14:41.319495516 +0000 UTC m=+1067.310606149" watchObservedRunningTime="2025-12-09 17:14:41.345271842 +0000 UTC m=+1067.336382475" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.346079 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6xndz" podStartSLOduration=3.548378064 podStartE2EDuration="27.346073384s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.514509944 +0000 UTC m=+1042.505620577" lastFinishedPulling="2025-12-09 17:14:40.312205264 +0000 UTC m=+1066.303315897" observedRunningTime="2025-12-09 17:14:41.340314056 +0000 UTC m=+1067.331424689" watchObservedRunningTime="2025-12-09 17:14:41.346073384 +0000 UTC m=+1067.337184017" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.357656 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" podStartSLOduration=3.564149475 podStartE2EDuration="27.357637751s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.539454337 +0000 UTC m=+1042.530564970" lastFinishedPulling="2025-12-09 17:14:40.332942613 +0000 UTC m=+1066.324053246" observedRunningTime="2025-12-09 17:14:41.355741099 +0000 UTC m=+1067.346851732" watchObservedRunningTime="2025-12-09 17:14:41.357637751 +0000 UTC m=+1067.348748384" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.378792 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" podStartSLOduration=3.608324167 podStartE2EDuration="27.378773091s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.519547662 +0000 UTC m=+1042.510658295" lastFinishedPulling="2025-12-09 17:14:40.289996586 +0000 UTC m=+1066.281107219" observedRunningTime="2025-12-09 17:14:41.375397958 +0000 UTC m=+1067.366508601" watchObservedRunningTime="2025-12-09 17:14:41.378773091 +0000 UTC m=+1067.369883724" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.398865 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" podStartSLOduration=3.4391512 podStartE2EDuration="27.398845831s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.306573554 +0000 UTC m=+1042.297684187" lastFinishedPulling="2025-12-09 17:14:40.266268185 +0000 UTC m=+1066.257378818" observedRunningTime="2025-12-09 17:14:41.392745424 +0000 UTC m=+1067.383856057" watchObservedRunningTime="2025-12-09 17:14:41.398845831 +0000 UTC m=+1067.389956464" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.441719 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" podStartSLOduration=3.670628485 podStartE2EDuration="27.441702636s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.523367386 +0000 UTC m=+1042.514478019" lastFinishedPulling="2025-12-09 17:14:40.294441537 +0000 UTC m=+1066.285552170" observedRunningTime="2025-12-09 17:14:41.437369827 +0000 UTC m=+1067.428480460" watchObservedRunningTime="2025-12-09 17:14:41.441702636 +0000 UTC m=+1067.432813269" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.444162 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" podStartSLOduration=3.758666479 podStartE2EDuration="27.444153723s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.539754496 +0000 UTC m=+1042.530865129" lastFinishedPulling="2025-12-09 17:14:40.22524174 +0000 UTC m=+1066.216352373" observedRunningTime="2025-12-09 17:14:41.417119622 +0000 UTC m=+1067.408230255" watchObservedRunningTime="2025-12-09 17:14:41.444153723 +0000 UTC m=+1067.435264356" Dec 09 17:14:41 crc kubenswrapper[4840]: I1209 17:14:41.451997 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" podStartSLOduration=3.443616862 podStartE2EDuration="27.451980417s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:16.31409668 +0000 UTC m=+1042.305207313" lastFinishedPulling="2025-12-09 17:14:40.322460195 +0000 UTC m=+1066.313570868" observedRunningTime="2025-12-09 17:14:41.448847732 +0000 UTC m=+1067.439958365" watchObservedRunningTime="2025-12-09 17:14:41.451980417 +0000 UTC m=+1067.443091050" Dec 09 17:14:45 crc kubenswrapper[4840]: I1209 17:14:45.314331 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-796785f986-7mv2k" Dec 09 17:14:45 crc kubenswrapper[4840]: I1209 17:14:45.370550 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4kqr2" Dec 09 17:14:45 crc kubenswrapper[4840]: I1209 17:14:45.411284 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-67x7g" Dec 09 17:14:45 crc kubenswrapper[4840]: I1209 17:14:45.440846 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-q5v56" Dec 09 17:14:45 crc kubenswrapper[4840]: I1209 17:14:45.664317 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-xc9jz" Dec 09 17:14:46 crc kubenswrapper[4840]: I1209 17:14:46.399853 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:46 crc kubenswrapper[4840]: I1209 17:14:46.407055 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cf5c5d51-0dfb-414d-9f08-3c9be6400df5-cert\") pod \"infra-operator-controller-manager-78d48bff9d-zgj4l\" (UID: \"cf5c5d51-0dfb-414d-9f08-3c9be6400df5\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:46 crc kubenswrapper[4840]: I1209 17:14:46.633021 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:46 crc kubenswrapper[4840]: I1209 17:14:46.705115 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:46 crc kubenswrapper[4840]: I1209 17:14:46.710023 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f1441e7-48a5-433b-a3b7-882a3582ac88-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fmcphk\" (UID: \"1f1441e7-48a5-433b-a3b7-882a3582ac88\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:46 crc kubenswrapper[4840]: I1209 17:14:46.845522 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:46 crc kubenswrapper[4840]: I1209 17:14:46.908273 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:46 crc kubenswrapper[4840]: I1209 17:14:46.908415 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:46 crc kubenswrapper[4840]: I1209 17:14:46.914824 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-metrics-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:46 crc kubenswrapper[4840]: I1209 17:14:46.915988 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a0c4ab40-b641-4154-a607-dfe342057b15-webhook-certs\") pod \"openstack-operator-controller-manager-866b78c4d6-gnpwh\" (UID: \"a0c4ab40-b641-4154-a607-dfe342057b15\") " pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:47 crc kubenswrapper[4840]: W1209 17:14:47.093850 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf5c5d51_0dfb_414d_9f08_3c9be6400df5.slice/crio-931f1dbabcea2efcb44df44be1b231927cf44f3975300727c89b29fce2bb9018 WatchSource:0}: Error finding container 931f1dbabcea2efcb44df44be1b231927cf44f3975300727c89b29fce2bb9018: Status 404 returned error can't find the container with id 931f1dbabcea2efcb44df44be1b231927cf44f3975300727c89b29fce2bb9018 Dec 09 17:14:47 crc kubenswrapper[4840]: I1209 17:14:47.094270 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l"] Dec 09 17:14:47 crc kubenswrapper[4840]: I1209 17:14:47.186987 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:47 crc kubenswrapper[4840]: I1209 17:14:47.249491 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk"] Dec 09 17:14:47 crc kubenswrapper[4840]: W1209 17:14:47.267251 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f1441e7_48a5_433b_a3b7_882a3582ac88.slice/crio-370a6c161a1f27af88706a01a3b09fe166ebca1b66ad8bb4bef084f6a5b09732 WatchSource:0}: Error finding container 370a6c161a1f27af88706a01a3b09fe166ebca1b66ad8bb4bef084f6a5b09732: Status 404 returned error can't find the container with id 370a6c161a1f27af88706a01a3b09fe166ebca1b66ad8bb4bef084f6a5b09732 Dec 09 17:14:47 crc kubenswrapper[4840]: I1209 17:14:47.357006 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" event={"ID":"1f1441e7-48a5-433b-a3b7-882a3582ac88","Type":"ContainerStarted","Data":"370a6c161a1f27af88706a01a3b09fe166ebca1b66ad8bb4bef084f6a5b09732"} Dec 09 17:14:47 crc kubenswrapper[4840]: I1209 17:14:47.358922 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" event={"ID":"cf5c5d51-0dfb-414d-9f08-3c9be6400df5","Type":"ContainerStarted","Data":"931f1dbabcea2efcb44df44be1b231927cf44f3975300727c89b29fce2bb9018"} Dec 09 17:14:47 crc kubenswrapper[4840]: I1209 17:14:47.600768 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh"] Dec 09 17:14:47 crc kubenswrapper[4840]: W1209 17:14:47.603851 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0c4ab40_b641_4154_a607_dfe342057b15.slice/crio-88750c324034939cbf933110dbb922bafcc0f5756723d2f45a2ef25903b681f8 WatchSource:0}: Error finding container 88750c324034939cbf933110dbb922bafcc0f5756723d2f45a2ef25903b681f8: Status 404 returned error can't find the container with id 88750c324034939cbf933110dbb922bafcc0f5756723d2f45a2ef25903b681f8 Dec 09 17:14:48 crc kubenswrapper[4840]: I1209 17:14:48.372910 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" event={"ID":"a0c4ab40-b641-4154-a607-dfe342057b15","Type":"ContainerStarted","Data":"88750c324034939cbf933110dbb922bafcc0f5756723d2f45a2ef25903b681f8"} Dec 09 17:14:54 crc kubenswrapper[4840]: I1209 17:14:54.432318 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" event={"ID":"a0c4ab40-b641-4154-a607-dfe342057b15","Type":"ContainerStarted","Data":"8d16ab437c310f193173cf77bd8cd0ba5384ef3d898159f9820a4ee840b56c94"} Dec 09 17:14:54 crc kubenswrapper[4840]: I1209 17:14:54.432759 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:14:54 crc kubenswrapper[4840]: I1209 17:14:54.458233 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" podStartSLOduration=40.458212489 podStartE2EDuration="40.458212489s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:14:54.453742786 +0000 UTC m=+1080.444853429" watchObservedRunningTime="2025-12-09 17:14:54.458212489 +0000 UTC m=+1080.449323142" Dec 09 17:14:54 crc kubenswrapper[4840]: I1209 17:14:54.946366 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-wb8sz" Dec 09 17:14:54 crc kubenswrapper[4840]: I1209 17:14:54.995587 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-r5zgb" Dec 09 17:14:56 crc kubenswrapper[4840]: I1209 17:14:56.450039 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" event={"ID":"cf5c5d51-0dfb-414d-9f08-3c9be6400df5","Type":"ContainerStarted","Data":"d27cc299b2da205192080501874e520c46575f08cb6caa08a30ee22ccfe8af70"} Dec 09 17:14:56 crc kubenswrapper[4840]: I1209 17:14:56.450437 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" event={"ID":"cf5c5d51-0dfb-414d-9f08-3c9be6400df5","Type":"ContainerStarted","Data":"e97607ba07d728b29b4934b4498cb71fe79a2abcc4d44a66bef1b4927584263f"} Dec 09 17:14:56 crc kubenswrapper[4840]: I1209 17:14:56.450476 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:14:56 crc kubenswrapper[4840]: I1209 17:14:56.455076 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" event={"ID":"1f1441e7-48a5-433b-a3b7-882a3582ac88","Type":"ContainerStarted","Data":"6a6a008c4fd1cf99f3d5f8ae6fa3a9443a80052d346b90d3db9d29b45fc556e0"} Dec 09 17:14:56 crc kubenswrapper[4840]: I1209 17:14:56.455124 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" event={"ID":"1f1441e7-48a5-433b-a3b7-882a3582ac88","Type":"ContainerStarted","Data":"bb1597dc33de68cb88f35353daff85c2e64a4e7f2d86ea7bb4de53aba3632833"} Dec 09 17:14:56 crc kubenswrapper[4840]: I1209 17:14:56.455513 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:14:56 crc kubenswrapper[4840]: I1209 17:14:56.482841 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" podStartSLOduration=34.266379986 podStartE2EDuration="42.482802447s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:47.09587929 +0000 UTC m=+1073.086989923" lastFinishedPulling="2025-12-09 17:14:55.312301741 +0000 UTC m=+1081.303412384" observedRunningTime="2025-12-09 17:14:56.46576728 +0000 UTC m=+1082.456877973" watchObservedRunningTime="2025-12-09 17:14:56.482802447 +0000 UTC m=+1082.473913120" Dec 09 17:14:56 crc kubenswrapper[4840]: I1209 17:14:56.502743 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" podStartSLOduration=34.456291093 podStartE2EDuration="42.502720493s" podCreationTimestamp="2025-12-09 17:14:14 +0000 UTC" firstStartedPulling="2025-12-09 17:14:47.270460176 +0000 UTC m=+1073.261570819" lastFinishedPulling="2025-12-09 17:14:55.316889586 +0000 UTC m=+1081.308000219" observedRunningTime="2025-12-09 17:14:56.499126215 +0000 UTC m=+1082.490236848" watchObservedRunningTime="2025-12-09 17:14:56.502720493 +0000 UTC m=+1082.493831126" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.185556 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l"] Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.186737 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.190054 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.190720 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.198123 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l"] Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.356311 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/576e7f6a-1593-456a-8530-2fe9909fa1b3-config-volume\") pod \"collect-profiles-29421675-4lw2l\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.356375 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/576e7f6a-1593-456a-8530-2fe9909fa1b3-secret-volume\") pod \"collect-profiles-29421675-4lw2l\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.356785 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7r6tq\" (UniqueName: \"kubernetes.io/projected/576e7f6a-1593-456a-8530-2fe9909fa1b3-kube-api-access-7r6tq\") pod \"collect-profiles-29421675-4lw2l\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.458406 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7r6tq\" (UniqueName: \"kubernetes.io/projected/576e7f6a-1593-456a-8530-2fe9909fa1b3-kube-api-access-7r6tq\") pod \"collect-profiles-29421675-4lw2l\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.458505 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/576e7f6a-1593-456a-8530-2fe9909fa1b3-config-volume\") pod \"collect-profiles-29421675-4lw2l\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.458544 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/576e7f6a-1593-456a-8530-2fe9909fa1b3-secret-volume\") pod \"collect-profiles-29421675-4lw2l\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.459465 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/576e7f6a-1593-456a-8530-2fe9909fa1b3-config-volume\") pod \"collect-profiles-29421675-4lw2l\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.466862 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/576e7f6a-1593-456a-8530-2fe9909fa1b3-secret-volume\") pod \"collect-profiles-29421675-4lw2l\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.477521 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7r6tq\" (UniqueName: \"kubernetes.io/projected/576e7f6a-1593-456a-8530-2fe9909fa1b3-kube-api-access-7r6tq\") pod \"collect-profiles-29421675-4lw2l\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.558989 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:00 crc kubenswrapper[4840]: I1209 17:15:00.969233 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l"] Dec 09 17:15:01 crc kubenswrapper[4840]: I1209 17:15:01.513551 4840 generic.go:334] "Generic (PLEG): container finished" podID="576e7f6a-1593-456a-8530-2fe9909fa1b3" containerID="314b1bb38798c7ca9d98f134281cc7a931655fab16e93ca327c42011739fc918" exitCode=0 Dec 09 17:15:01 crc kubenswrapper[4840]: I1209 17:15:01.513647 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" event={"ID":"576e7f6a-1593-456a-8530-2fe9909fa1b3","Type":"ContainerDied","Data":"314b1bb38798c7ca9d98f134281cc7a931655fab16e93ca327c42011739fc918"} Dec 09 17:15:01 crc kubenswrapper[4840]: I1209 17:15:01.513931 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" event={"ID":"576e7f6a-1593-456a-8530-2fe9909fa1b3","Type":"ContainerStarted","Data":"f4dcfb35904bab2da979881a02ef5d1fa604ae661bbd62bc130f81e34b9cf619"} Dec 09 17:15:02 crc kubenswrapper[4840]: I1209 17:15:02.896799 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.093724 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/576e7f6a-1593-456a-8530-2fe9909fa1b3-config-volume\") pod \"576e7f6a-1593-456a-8530-2fe9909fa1b3\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.093801 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/576e7f6a-1593-456a-8530-2fe9909fa1b3-secret-volume\") pod \"576e7f6a-1593-456a-8530-2fe9909fa1b3\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.093992 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7r6tq\" (UniqueName: \"kubernetes.io/projected/576e7f6a-1593-456a-8530-2fe9909fa1b3-kube-api-access-7r6tq\") pod \"576e7f6a-1593-456a-8530-2fe9909fa1b3\" (UID: \"576e7f6a-1593-456a-8530-2fe9909fa1b3\") " Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.095025 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/576e7f6a-1593-456a-8530-2fe9909fa1b3-config-volume" (OuterVolumeSpecName: "config-volume") pod "576e7f6a-1593-456a-8530-2fe9909fa1b3" (UID: "576e7f6a-1593-456a-8530-2fe9909fa1b3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.099088 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/576e7f6a-1593-456a-8530-2fe9909fa1b3-kube-api-access-7r6tq" (OuterVolumeSpecName: "kube-api-access-7r6tq") pod "576e7f6a-1593-456a-8530-2fe9909fa1b3" (UID: "576e7f6a-1593-456a-8530-2fe9909fa1b3"). InnerVolumeSpecName "kube-api-access-7r6tq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.101212 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/576e7f6a-1593-456a-8530-2fe9909fa1b3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "576e7f6a-1593-456a-8530-2fe9909fa1b3" (UID: "576e7f6a-1593-456a-8530-2fe9909fa1b3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.195826 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7r6tq\" (UniqueName: \"kubernetes.io/projected/576e7f6a-1593-456a-8530-2fe9909fa1b3-kube-api-access-7r6tq\") on node \"crc\" DevicePath \"\"" Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.195881 4840 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/576e7f6a-1593-456a-8530-2fe9909fa1b3-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.195900 4840 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/576e7f6a-1593-456a-8530-2fe9909fa1b3-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.537635 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" event={"ID":"576e7f6a-1593-456a-8530-2fe9909fa1b3","Type":"ContainerDied","Data":"f4dcfb35904bab2da979881a02ef5d1fa604ae661bbd62bc130f81e34b9cf619"} Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.537693 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4dcfb35904bab2da979881a02ef5d1fa604ae661bbd62bc130f81e34b9cf619" Dec 09 17:15:03 crc kubenswrapper[4840]: I1209 17:15:03.537734 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l" Dec 09 17:15:04 crc kubenswrapper[4840]: I1209 17:15:04.036660 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:15:04 crc kubenswrapper[4840]: I1209 17:15:04.036717 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:15:06 crc kubenswrapper[4840]: I1209 17:15:06.643246 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-zgj4l" Dec 09 17:15:06 crc kubenswrapper[4840]: I1209 17:15:06.854187 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fmcphk" Dec 09 17:15:07 crc kubenswrapper[4840]: I1209 17:15:07.194400 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-866b78c4d6-gnpwh" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.138832 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-54clj"] Dec 09 17:15:23 crc kubenswrapper[4840]: E1209 17:15:23.139692 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="576e7f6a-1593-456a-8530-2fe9909fa1b3" containerName="collect-profiles" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.139704 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="576e7f6a-1593-456a-8530-2fe9909fa1b3" containerName="collect-profiles" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.139854 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="576e7f6a-1593-456a-8530-2fe9909fa1b3" containerName="collect-profiles" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.140645 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.143899 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.144124 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.144300 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-mql6s" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.152259 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.153290 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-54clj"] Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.213975 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-vngnf"] Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.215573 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.217645 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.226790 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-vngnf"] Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.289330 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79caba5d-a685-43b0-a7b0-b0ca20543323-config\") pod \"dnsmasq-dns-675f4bcbfc-54clj\" (UID: \"79caba5d-a685-43b0-a7b0-b0ca20543323\") " pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.289510 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5fz6\" (UniqueName: \"kubernetes.io/projected/79caba5d-a685-43b0-a7b0-b0ca20543323-kube-api-access-q5fz6\") pod \"dnsmasq-dns-675f4bcbfc-54clj\" (UID: \"79caba5d-a685-43b0-a7b0-b0ca20543323\") " pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.390668 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-vngnf\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.390711 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-config\") pod \"dnsmasq-dns-78dd6ddcc-vngnf\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.390747 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5fz6\" (UniqueName: \"kubernetes.io/projected/79caba5d-a685-43b0-a7b0-b0ca20543323-kube-api-access-q5fz6\") pod \"dnsmasq-dns-675f4bcbfc-54clj\" (UID: \"79caba5d-a685-43b0-a7b0-b0ca20543323\") " pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.390788 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qq2wf\" (UniqueName: \"kubernetes.io/projected/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-kube-api-access-qq2wf\") pod \"dnsmasq-dns-78dd6ddcc-vngnf\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.390828 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79caba5d-a685-43b0-a7b0-b0ca20543323-config\") pod \"dnsmasq-dns-675f4bcbfc-54clj\" (UID: \"79caba5d-a685-43b0-a7b0-b0ca20543323\") " pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.391769 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79caba5d-a685-43b0-a7b0-b0ca20543323-config\") pod \"dnsmasq-dns-675f4bcbfc-54clj\" (UID: \"79caba5d-a685-43b0-a7b0-b0ca20543323\") " pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.423802 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5fz6\" (UniqueName: \"kubernetes.io/projected/79caba5d-a685-43b0-a7b0-b0ca20543323-kube-api-access-q5fz6\") pod \"dnsmasq-dns-675f4bcbfc-54clj\" (UID: \"79caba5d-a685-43b0-a7b0-b0ca20543323\") " pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.459884 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.492006 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qq2wf\" (UniqueName: \"kubernetes.io/projected/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-kube-api-access-qq2wf\") pod \"dnsmasq-dns-78dd6ddcc-vngnf\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.492107 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-vngnf\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.492127 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-config\") pod \"dnsmasq-dns-78dd6ddcc-vngnf\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.493025 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-vngnf\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.493601 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-config\") pod \"dnsmasq-dns-78dd6ddcc-vngnf\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.517897 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qq2wf\" (UniqueName: \"kubernetes.io/projected/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-kube-api-access-qq2wf\") pod \"dnsmasq-dns-78dd6ddcc-vngnf\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.540482 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.928855 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-54clj"] Dec 09 17:15:23 crc kubenswrapper[4840]: I1209 17:15:23.984597 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-vngnf"] Dec 09 17:15:23 crc kubenswrapper[4840]: W1209 17:15:23.989396 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14bb8342_d1b2_4dc4_a9d4_27ebe092be4c.slice/crio-1ec27355dc93e4164b93f0d18d19fbb7b04ecc5c84f31919f22e7ef60e110f59 WatchSource:0}: Error finding container 1ec27355dc93e4164b93f0d18d19fbb7b04ecc5c84f31919f22e7ef60e110f59: Status 404 returned error can't find the container with id 1ec27355dc93e4164b93f0d18d19fbb7b04ecc5c84f31919f22e7ef60e110f59 Dec 09 17:15:24 crc kubenswrapper[4840]: I1209 17:15:24.720787 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" event={"ID":"79caba5d-a685-43b0-a7b0-b0ca20543323","Type":"ContainerStarted","Data":"dd7ed559fe81c910c5876a0f5142914daf387237f7aea6a3c3172efc7b7b9531"} Dec 09 17:15:24 crc kubenswrapper[4840]: I1209 17:15:24.722335 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" event={"ID":"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c","Type":"ContainerStarted","Data":"1ec27355dc93e4164b93f0d18d19fbb7b04ecc5c84f31919f22e7ef60e110f59"} Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.325733 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-54clj"] Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.354460 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-59s29"] Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.357033 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.371387 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-59s29"] Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.544177 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-dns-svc\") pod \"dnsmasq-dns-666b6646f7-59s29\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.544332 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-config\") pod \"dnsmasq-dns-666b6646f7-59s29\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.544382 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dd9cr\" (UniqueName: \"kubernetes.io/projected/956f73a1-e1d1-4cc8-aa72-d9be637417b1-kube-api-access-dd9cr\") pod \"dnsmasq-dns-666b6646f7-59s29\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.620461 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-vngnf"] Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.645757 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-config\") pod \"dnsmasq-dns-666b6646f7-59s29\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.645822 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dd9cr\" (UniqueName: \"kubernetes.io/projected/956f73a1-e1d1-4cc8-aa72-d9be637417b1-kube-api-access-dd9cr\") pod \"dnsmasq-dns-666b6646f7-59s29\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.645898 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-dns-svc\") pod \"dnsmasq-dns-666b6646f7-59s29\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.648574 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-dns-svc\") pod \"dnsmasq-dns-666b6646f7-59s29\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.648834 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-config\") pod \"dnsmasq-dns-666b6646f7-59s29\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.659175 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-62c2p"] Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.661181 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.665944 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-62c2p"] Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.701862 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dd9cr\" (UniqueName: \"kubernetes.io/projected/956f73a1-e1d1-4cc8-aa72-d9be637417b1-kube-api-access-dd9cr\") pod \"dnsmasq-dns-666b6646f7-59s29\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.848785 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-config\") pod \"dnsmasq-dns-57d769cc4f-62c2p\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.849262 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-62c2p\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.849314 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x96jx\" (UniqueName: \"kubernetes.io/projected/bae6b28b-2f89-44c6-b196-145e099e94e5-kube-api-access-x96jx\") pod \"dnsmasq-dns-57d769cc4f-62c2p\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.950366 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-config\") pod \"dnsmasq-dns-57d769cc4f-62c2p\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.950422 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-62c2p\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.950465 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x96jx\" (UniqueName: \"kubernetes.io/projected/bae6b28b-2f89-44c6-b196-145e099e94e5-kube-api-access-x96jx\") pod \"dnsmasq-dns-57d769cc4f-62c2p\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.951191 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-config\") pod \"dnsmasq-dns-57d769cc4f-62c2p\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.951201 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-62c2p\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.979428 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:15:26 crc kubenswrapper[4840]: I1209 17:15:26.989332 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x96jx\" (UniqueName: \"kubernetes.io/projected/bae6b28b-2f89-44c6-b196-145e099e94e5-kube-api-access-x96jx\") pod \"dnsmasq-dns-57d769cc4f-62c2p\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.279817 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.522053 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.523998 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.531869 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.531952 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.532350 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.532363 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-gdrvc" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.532471 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.532567 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.534447 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.548925 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.558190 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-59s29"] Dec 09 17:15:27 crc kubenswrapper[4840]: W1209 17:15:27.562361 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod956f73a1_e1d1_4cc8_aa72_d9be637417b1.slice/crio-306397de722f4cb84c5630cc14a6236e78ee360389803cf903ad543d23f7f6d6 WatchSource:0}: Error finding container 306397de722f4cb84c5630cc14a6236e78ee360389803cf903ad543d23f7f6d6: Status 404 returned error can't find the container with id 306397de722f4cb84c5630cc14a6236e78ee360389803cf903ad543d23f7f6d6 Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.658956 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9b2bc342-2987-4fc2-b078-bc5aa00c063d-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.659047 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.659092 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.659110 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.659130 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.659520 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb8w2\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-kube-api-access-jb8w2\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.659597 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.659621 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.659637 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-config-data\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.659660 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.659732 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9b2bc342-2987-4fc2-b078-bc5aa00c063d-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.741098 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-62c2p"] Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.761039 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9b2bc342-2987-4fc2-b078-bc5aa00c063d-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.761096 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.761175 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.761193 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.761260 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.761297 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb8w2\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-kube-api-access-jb8w2\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.761324 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.761340 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.761354 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-config-data\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.761379 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.761418 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9b2bc342-2987-4fc2-b078-bc5aa00c063d-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.762938 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.763620 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.764242 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-config-data\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.764306 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.764752 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.765931 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" event={"ID":"bae6b28b-2f89-44c6-b196-145e099e94e5","Type":"ContainerStarted","Data":"53501e3741eb5a9c69cb179a434b15d6a0a19951f09c462b5ab387c4978edd7d"} Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.768153 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.768184 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b98c978526173bf0076aa510cb6293cae39a106ae593fc3bcfda304b0021f1ac/globalmount\"" pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.768366 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9b2bc342-2987-4fc2-b078-bc5aa00c063d-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.768539 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-59s29" event={"ID":"956f73a1-e1d1-4cc8-aa72-d9be637417b1","Type":"ContainerStarted","Data":"306397de722f4cb84c5630cc14a6236e78ee360389803cf903ad543d23f7f6d6"} Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.771049 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.772328 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.772729 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9b2bc342-2987-4fc2-b078-bc5aa00c063d-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.778853 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb8w2\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-kube-api-access-jb8w2\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.802797 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.804269 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.807204 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.807284 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.807844 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.808062 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.809866 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.810096 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-k4twf" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.810172 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.819271 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\") pod \"rabbitmq-server-0\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.823268 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.854536 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.964671 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.964735 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.964769 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.964799 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.964831 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.964862 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7170c3b2-9d93-4736-8ade-66423bc4a081-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.964893 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5cgm\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-kube-api-access-k5cgm\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.964912 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.964931 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.964946 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7170c3b2-9d93-4736-8ade-66423bc4a081-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:27 crc kubenswrapper[4840]: I1209 17:15:27.964984 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.066677 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7170c3b2-9d93-4736-8ade-66423bc4a081-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.066723 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.066766 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.066806 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.066833 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.066852 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.066871 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.066893 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7170c3b2-9d93-4736-8ade-66423bc4a081-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.066918 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5cgm\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-kube-api-access-k5cgm\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.066935 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.066952 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.067362 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.080865 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.080910 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/52d5c339ee81f996f1e83b0d60c39ddb7d281130efbf18960868a994e4807c4a/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.081619 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.082171 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.084096 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.084202 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.084417 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7170c3b2-9d93-4736-8ade-66423bc4a081-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.084642 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.085787 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.088812 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7170c3b2-9d93-4736-8ade-66423bc4a081-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.088998 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5cgm\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-kube-api-access-k5cgm\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.112289 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\") pod \"rabbitmq-cell1-server-0\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.155279 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:15:28 crc kubenswrapper[4840]: I1209 17:15:28.406085 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.063834 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.066619 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.074814 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.075029 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.077262 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.077532 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-klxx6" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.078107 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.081388 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.181610 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71339989-bd1f-4da4-8976-62fbb767a30e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.181666 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/71339989-bd1f-4da4-8976-62fbb767a30e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.181822 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3da20777-652b-47cc-89d7-aa98e44fd76b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3da20777-652b-47cc-89d7-aa98e44fd76b\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.182011 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/71339989-bd1f-4da4-8976-62fbb767a30e-config-data-default\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.182076 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/71339989-bd1f-4da4-8976-62fbb767a30e-kolla-config\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.182154 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/71339989-bd1f-4da4-8976-62fbb767a30e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.182188 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85dfr\" (UniqueName: \"kubernetes.io/projected/71339989-bd1f-4da4-8976-62fbb767a30e-kube-api-access-85dfr\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.182235 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/71339989-bd1f-4da4-8976-62fbb767a30e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.284711 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/71339989-bd1f-4da4-8976-62fbb767a30e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.284797 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85dfr\" (UniqueName: \"kubernetes.io/projected/71339989-bd1f-4da4-8976-62fbb767a30e-kube-api-access-85dfr\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.285170 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/71339989-bd1f-4da4-8976-62fbb767a30e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.285215 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71339989-bd1f-4da4-8976-62fbb767a30e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.285248 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/71339989-bd1f-4da4-8976-62fbb767a30e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.285284 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3da20777-652b-47cc-89d7-aa98e44fd76b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3da20777-652b-47cc-89d7-aa98e44fd76b\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.285333 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/71339989-bd1f-4da4-8976-62fbb767a30e-config-data-default\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.285366 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/71339989-bd1f-4da4-8976-62fbb767a30e-kolla-config\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.286633 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/71339989-bd1f-4da4-8976-62fbb767a30e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.286658 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/71339989-bd1f-4da4-8976-62fbb767a30e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.287278 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/71339989-bd1f-4da4-8976-62fbb767a30e-config-data-default\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.288392 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/71339989-bd1f-4da4-8976-62fbb767a30e-kolla-config\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.291105 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/71339989-bd1f-4da4-8976-62fbb767a30e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.291565 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.291610 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3da20777-652b-47cc-89d7-aa98e44fd76b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3da20777-652b-47cc-89d7-aa98e44fd76b\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7571d52ce883e9a133f13cc8a46688ae3142e220eedc7e2e83e0ed7d7bf370fa/globalmount\"" pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.294410 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71339989-bd1f-4da4-8976-62fbb767a30e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.331723 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85dfr\" (UniqueName: \"kubernetes.io/projected/71339989-bd1f-4da4-8976-62fbb767a30e-kube-api-access-85dfr\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.340658 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3da20777-652b-47cc-89d7-aa98e44fd76b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3da20777-652b-47cc-89d7-aa98e44fd76b\") pod \"openstack-galera-0\" (UID: \"71339989-bd1f-4da4-8976-62fbb767a30e\") " pod="openstack/openstack-galera-0" Dec 09 17:15:29 crc kubenswrapper[4840]: I1209 17:15:29.386749 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.414435 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.417308 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.421111 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.421291 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-dcvrk" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.421559 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.426691 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.436374 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.518589 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.518656 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d2a352fd-1fc4-4589-acb9-fc022ba62fde\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d2a352fd-1fc4-4589-acb9-fc022ba62fde\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.518682 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.518704 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.518752 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.518801 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.518830 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.518871 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpbdt\" (UniqueName: \"kubernetes.io/projected/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-kube-api-access-jpbdt\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.620045 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d2a352fd-1fc4-4589-acb9-fc022ba62fde\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d2a352fd-1fc4-4589-acb9-fc022ba62fde\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.620086 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.620107 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.620156 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.620197 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.620216 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.620259 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpbdt\" (UniqueName: \"kubernetes.io/projected/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-kube-api-access-jpbdt\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.620292 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.621502 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.621707 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.621787 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.621951 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.625615 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.626827 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.637022 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpbdt\" (UniqueName: \"kubernetes.io/projected/e56689ef-4c1c-4775-9740-3e1ec3a0f4e8-kube-api-access-jpbdt\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.638090 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.638120 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d2a352fd-1fc4-4589-acb9-fc022ba62fde\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d2a352fd-1fc4-4589-acb9-fc022ba62fde\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/68dcc12f53a86cecaf7d6ddbfb1bb262d475ebefdc35a948ddd888393f8bf0e8/globalmount\"" pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.690903 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d2a352fd-1fc4-4589-acb9-fc022ba62fde\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d2a352fd-1fc4-4589-acb9-fc022ba62fde\") pod \"openstack-cell1-galera-0\" (UID: \"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.696838 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.697742 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.707425 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.707633 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-46298" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.707630 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.711559 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.746817 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.823361 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6cf8f20c-e36a-4ed4-b627-3b88423123c9-config-data\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.823747 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf8f20c-e36a-4ed4-b627-3b88423123c9-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.823804 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fkrq\" (UniqueName: \"kubernetes.io/projected/6cf8f20c-e36a-4ed4-b627-3b88423123c9-kube-api-access-6fkrq\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.823844 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cf8f20c-e36a-4ed4-b627-3b88423123c9-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.823901 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6cf8f20c-e36a-4ed4-b627-3b88423123c9-kolla-config\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.925398 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6cf8f20c-e36a-4ed4-b627-3b88423123c9-kolla-config\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.925492 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6cf8f20c-e36a-4ed4-b627-3b88423123c9-config-data\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.925518 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf8f20c-e36a-4ed4-b627-3b88423123c9-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.925556 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fkrq\" (UniqueName: \"kubernetes.io/projected/6cf8f20c-e36a-4ed4-b627-3b88423123c9-kube-api-access-6fkrq\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.925584 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cf8f20c-e36a-4ed4-b627-3b88423123c9-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.926335 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6cf8f20c-e36a-4ed4-b627-3b88423123c9-kolla-config\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.926394 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6cf8f20c-e36a-4ed4-b627-3b88423123c9-config-data\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.928916 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf8f20c-e36a-4ed4-b627-3b88423123c9-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.931007 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cf8f20c-e36a-4ed4-b627-3b88423123c9-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:30 crc kubenswrapper[4840]: I1209 17:15:30.943012 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fkrq\" (UniqueName: \"kubernetes.io/projected/6cf8f20c-e36a-4ed4-b627-3b88423123c9-kube-api-access-6fkrq\") pod \"memcached-0\" (UID: \"6cf8f20c-e36a-4ed4-b627-3b88423123c9\") " pod="openstack/memcached-0" Dec 09 17:15:31 crc kubenswrapper[4840]: I1209 17:15:31.023102 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 09 17:15:31 crc kubenswrapper[4840]: I1209 17:15:31.801508 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9b2bc342-2987-4fc2-b078-bc5aa00c063d","Type":"ContainerStarted","Data":"bd221d929f41d4077c0bace904da3e6daef22b54abc96d83269a2b79799ae9ca"} Dec 09 17:15:32 crc kubenswrapper[4840]: I1209 17:15:32.769287 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 17:15:32 crc kubenswrapper[4840]: I1209 17:15:32.933400 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 17:15:32 crc kubenswrapper[4840]: I1209 17:15:32.934649 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 17:15:32 crc kubenswrapper[4840]: I1209 17:15:32.936550 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-cstqc" Dec 09 17:15:32 crc kubenswrapper[4840]: I1209 17:15:32.941832 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.013948 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbpb2\" (UniqueName: \"kubernetes.io/projected/13330488-1e93-4a88-8f15-331ee0b935cf-kube-api-access-kbpb2\") pod \"kube-state-metrics-0\" (UID: \"13330488-1e93-4a88-8f15-331ee0b935cf\") " pod="openstack/kube-state-metrics-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.114583 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbpb2\" (UniqueName: \"kubernetes.io/projected/13330488-1e93-4a88-8f15-331ee0b935cf-kube-api-access-kbpb2\") pod \"kube-state-metrics-0\" (UID: \"13330488-1e93-4a88-8f15-331ee0b935cf\") " pod="openstack/kube-state-metrics-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.138870 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbpb2\" (UniqueName: \"kubernetes.io/projected/13330488-1e93-4a88-8f15-331ee0b935cf-kube-api-access-kbpb2\") pod \"kube-state-metrics-0\" (UID: \"13330488-1e93-4a88-8f15-331ee0b935cf\") " pod="openstack/kube-state-metrics-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.253223 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.650939 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.653954 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.657361 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.657616 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.657841 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-jdxgh" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.658070 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.658241 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.671849 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.726026 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/694a7e34-d1ce-4a1b-8475-fbb5d250b955-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.726092 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/694a7e34-d1ce-4a1b-8475-fbb5d250b955-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.726120 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/694a7e34-d1ce-4a1b-8475-fbb5d250b955-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.726185 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pv67f\" (UniqueName: \"kubernetes.io/projected/694a7e34-d1ce-4a1b-8475-fbb5d250b955-kube-api-access-pv67f\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.726253 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/694a7e34-d1ce-4a1b-8475-fbb5d250b955-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.726285 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/694a7e34-d1ce-4a1b-8475-fbb5d250b955-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.726327 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/694a7e34-d1ce-4a1b-8475-fbb5d250b955-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.827619 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pv67f\" (UniqueName: \"kubernetes.io/projected/694a7e34-d1ce-4a1b-8475-fbb5d250b955-kube-api-access-pv67f\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.827894 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/694a7e34-d1ce-4a1b-8475-fbb5d250b955-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.827916 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/694a7e34-d1ce-4a1b-8475-fbb5d250b955-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.827949 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/694a7e34-d1ce-4a1b-8475-fbb5d250b955-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.828044 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/694a7e34-d1ce-4a1b-8475-fbb5d250b955-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.828074 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/694a7e34-d1ce-4a1b-8475-fbb5d250b955-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.828090 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/694a7e34-d1ce-4a1b-8475-fbb5d250b955-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.829036 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/694a7e34-d1ce-4a1b-8475-fbb5d250b955-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.833417 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/694a7e34-d1ce-4a1b-8475-fbb5d250b955-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.833432 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/694a7e34-d1ce-4a1b-8475-fbb5d250b955-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.838342 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/694a7e34-d1ce-4a1b-8475-fbb5d250b955-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.843721 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/694a7e34-d1ce-4a1b-8475-fbb5d250b955-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.844239 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/694a7e34-d1ce-4a1b-8475-fbb5d250b955-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.848714 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pv67f\" (UniqueName: \"kubernetes.io/projected/694a7e34-d1ce-4a1b-8475-fbb5d250b955-kube-api-access-pv67f\") pod \"alertmanager-metric-storage-0\" (UID: \"694a7e34-d1ce-4a1b-8475-fbb5d250b955\") " pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:33 crc kubenswrapper[4840]: I1209 17:15:33.974620 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.036181 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.036248 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.036306 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.036992 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e12e998619dcfb414f5abc4e5512aafccfda5811abde023bf5ca07762965de9f"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.037056 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://e12e998619dcfb414f5abc4e5512aafccfda5811abde023bf5ca07762965de9f" gracePeriod=600 Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.236811 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.238589 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.242474 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.242510 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.243182 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.243397 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-d4x6z" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.243458 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.244176 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.252133 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.334943 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.335017 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.335187 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.335234 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.335257 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.335282 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tksfr\" (UniqueName: \"kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-kube-api-access-tksfr\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.335456 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-041ac8f0-de82-421c-9846-9af6d3836978\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.335521 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.436853 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-041ac8f0-de82-421c-9846-9af6d3836978\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.437095 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.437235 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.437263 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.437333 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.437353 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.437371 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.437406 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tksfr\" (UniqueName: \"kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-kube-api-access-tksfr\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.438032 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.440852 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.441402 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.441895 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.442136 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-041ac8f0-de82-421c-9846-9af6d3836978\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fdb12b4bbbb8ef13fe38e41860e07d7602896cea5ea735efd7d4e9b73bcc1e4d/globalmount\"" pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.442565 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.442919 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.448224 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.456898 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tksfr\" (UniqueName: \"kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-kube-api-access-tksfr\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.472416 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-041ac8f0-de82-421c-9846-9af6d3836978\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978\") pod \"prometheus-metric-storage-0\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.580630 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.826377 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="e12e998619dcfb414f5abc4e5512aafccfda5811abde023bf5ca07762965de9f" exitCode=0 Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.826418 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"e12e998619dcfb414f5abc4e5512aafccfda5811abde023bf5ca07762965de9f"} Dec 09 17:15:34 crc kubenswrapper[4840]: I1209 17:15:34.826749 4840 scope.go:117] "RemoveContainer" containerID="a086878e48cc9f08081b2c7308c271c8366e842b5fcd3ad6accfb78d0872a65b" Dec 09 17:15:36 crc kubenswrapper[4840]: W1209 17:15:36.005089 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7170c3b2_9d93_4736_8ade_66423bc4a081.slice/crio-feaad1bcb61b2536e09deb3da000bf3106c627d71ae7508b017a0490aea616c4 WatchSource:0}: Error finding container feaad1bcb61b2536e09deb3da000bf3106c627d71ae7508b017a0490aea616c4: Status 404 returned error can't find the container with id feaad1bcb61b2536e09deb3da000bf3106c627d71ae7508b017a0490aea616c4 Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.328394 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.852695 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7170c3b2-9d93-4736-8ade-66423bc4a081","Type":"ContainerStarted","Data":"feaad1bcb61b2536e09deb3da000bf3106c627d71ae7508b017a0490aea616c4"} Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.898721 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-v78xq"] Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.899859 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-v78xq" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.903379 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-7rhtj" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.903679 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.903795 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.913852 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.915532 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.917128 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.920055 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.920402 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.920618 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.920778 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-ztptm" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.922383 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-v78xq"] Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.936982 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-6vxgb"] Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.938703 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.944739 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.963642 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-6vxgb"] Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.988481 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/637ab881-6952-409f-8e9d-619aaf72fb51-ovn-controller-tls-certs\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.988547 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/637ab881-6952-409f-8e9d-619aaf72fb51-var-run\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.988583 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/637ab881-6952-409f-8e9d-619aaf72fb51-combined-ca-bundle\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.988608 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8zqv\" (UniqueName: \"kubernetes.io/projected/637ab881-6952-409f-8e9d-619aaf72fb51-kube-api-access-n8zqv\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.988657 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/637ab881-6952-409f-8e9d-619aaf72fb51-var-run-ovn\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.988685 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/637ab881-6952-409f-8e9d-619aaf72fb51-var-log-ovn\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:36 crc kubenswrapper[4840]: I1209 17:15:36.988716 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/637ab881-6952-409f-8e9d-619aaf72fb51-scripts\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.092658 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/637ab881-6952-409f-8e9d-619aaf72fb51-var-run-ovn\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.092901 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/637ab881-6952-409f-8e9d-619aaf72fb51-var-log-ovn\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.092949 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/637ab881-6952-409f-8e9d-619aaf72fb51-scripts\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.092992 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b1009e9-391e-4a13-8d90-f55fb6c3b329-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093025 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-etc-ovs\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093055 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b1009e9-391e-4a13-8d90-f55fb6c3b329-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093079 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-var-log\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093111 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3189254-8cff-481a-92f3-466a928de54e-scripts\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093144 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-var-run\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093176 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7b1009e9-391e-4a13-8d90-f55fb6c3b329-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093225 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rd2g\" (UniqueName: \"kubernetes.io/projected/7b1009e9-391e-4a13-8d90-f55fb6c3b329-kube-api-access-2rd2g\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093250 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-var-lib\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093287 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/637ab881-6952-409f-8e9d-619aaf72fb51-ovn-controller-tls-certs\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093315 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1009e9-391e-4a13-8d90-f55fb6c3b329-config\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093341 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8144453e-7451-40db-b934-39dd15d5cd1f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8144453e-7451-40db-b934-39dd15d5cd1f\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093368 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zx7bm\" (UniqueName: \"kubernetes.io/projected/d3189254-8cff-481a-92f3-466a928de54e-kube-api-access-zx7bm\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093399 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/637ab881-6952-409f-8e9d-619aaf72fb51-var-run\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093425 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b1009e9-391e-4a13-8d90-f55fb6c3b329-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093450 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/637ab881-6952-409f-8e9d-619aaf72fb51-combined-ca-bundle\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093473 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7b1009e9-391e-4a13-8d90-f55fb6c3b329-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093501 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8zqv\" (UniqueName: \"kubernetes.io/projected/637ab881-6952-409f-8e9d-619aaf72fb51-kube-api-access-n8zqv\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093520 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/637ab881-6952-409f-8e9d-619aaf72fb51-var-run-ovn\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.093895 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/637ab881-6952-409f-8e9d-619aaf72fb51-var-run\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.095602 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/637ab881-6952-409f-8e9d-619aaf72fb51-scripts\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.095828 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/637ab881-6952-409f-8e9d-619aaf72fb51-var-log-ovn\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.101151 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/637ab881-6952-409f-8e9d-619aaf72fb51-combined-ca-bundle\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.126795 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8zqv\" (UniqueName: \"kubernetes.io/projected/637ab881-6952-409f-8e9d-619aaf72fb51-kube-api-access-n8zqv\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.129858 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/637ab881-6952-409f-8e9d-619aaf72fb51-ovn-controller-tls-certs\") pod \"ovn-controller-v78xq\" (UID: \"637ab881-6952-409f-8e9d-619aaf72fb51\") " pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.194853 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-var-run\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.194910 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7b1009e9-391e-4a13-8d90-f55fb6c3b329-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.195115 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-var-run\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.194956 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rd2g\" (UniqueName: \"kubernetes.io/projected/7b1009e9-391e-4a13-8d90-f55fb6c3b329-kube-api-access-2rd2g\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196005 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-var-lib\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196054 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1009e9-391e-4a13-8d90-f55fb6c3b329-config\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196085 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8144453e-7451-40db-b934-39dd15d5cd1f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8144453e-7451-40db-b934-39dd15d5cd1f\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196112 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zx7bm\" (UniqueName: \"kubernetes.io/projected/d3189254-8cff-481a-92f3-466a928de54e-kube-api-access-zx7bm\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196152 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b1009e9-391e-4a13-8d90-f55fb6c3b329-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196196 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7b1009e9-391e-4a13-8d90-f55fb6c3b329-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196320 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b1009e9-391e-4a13-8d90-f55fb6c3b329-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196358 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-etc-ovs\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196392 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b1009e9-391e-4a13-8d90-f55fb6c3b329-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196422 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-var-log\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196450 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3189254-8cff-481a-92f3-466a928de54e-scripts\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.196764 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7b1009e9-391e-4a13-8d90-f55fb6c3b329-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.197487 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-etc-ovs\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.197946 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-var-lib\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.198082 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/d3189254-8cff-481a-92f3-466a928de54e-var-log\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.198889 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3189254-8cff-481a-92f3-466a928de54e-scripts\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.200779 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7b1009e9-391e-4a13-8d90-f55fb6c3b329-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.201912 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b1009e9-391e-4a13-8d90-f55fb6c3b329-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.202421 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.202617 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8144453e-7451-40db-b934-39dd15d5cd1f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8144453e-7451-40db-b934-39dd15d5cd1f\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2acc481122c910b3e0e4d18100dc48ba66ea94361e64473fa780fed057d6557c/globalmount\"" pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.202649 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1009e9-391e-4a13-8d90-f55fb6c3b329-config\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.206020 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b1009e9-391e-4a13-8d90-f55fb6c3b329-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.209124 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b1009e9-391e-4a13-8d90-f55fb6c3b329-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.214894 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zx7bm\" (UniqueName: \"kubernetes.io/projected/d3189254-8cff-481a-92f3-466a928de54e-kube-api-access-zx7bm\") pod \"ovn-controller-ovs-6vxgb\" (UID: \"d3189254-8cff-481a-92f3-466a928de54e\") " pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.215934 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rd2g\" (UniqueName: \"kubernetes.io/projected/7b1009e9-391e-4a13-8d90-f55fb6c3b329-kube-api-access-2rd2g\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.237412 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8144453e-7451-40db-b934-39dd15d5cd1f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8144453e-7451-40db-b934-39dd15d5cd1f\") pod \"ovsdbserver-nb-0\" (UID: \"7b1009e9-391e-4a13-8d90-f55fb6c3b329\") " pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.260415 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-v78xq" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.277627 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 09 17:15:37 crc kubenswrapper[4840]: I1209 17:15:37.287349 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.015618 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.017631 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.029087 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.029381 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.029653 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.030200 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-7z4hb" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.030335 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.204088 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dztpb\" (UniqueName: \"kubernetes.io/projected/a4b9253d-0e13-4dd3-8b9a-7428281a743d-kube-api-access-dztpb\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.204173 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4b9253d-0e13-4dd3-8b9a-7428281a743d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.204199 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b9253d-0e13-4dd3-8b9a-7428281a743d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.204229 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4b9253d-0e13-4dd3-8b9a-7428281a743d-config\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.204271 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3ee5d8be-9313-44b1-bc64-88cd05ff95a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3ee5d8be-9313-44b1-bc64-88cd05ff95a8\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.204306 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b9253d-0e13-4dd3-8b9a-7428281a743d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.204341 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a4b9253d-0e13-4dd3-8b9a-7428281a743d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.204382 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4b9253d-0e13-4dd3-8b9a-7428281a743d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.305951 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dztpb\" (UniqueName: \"kubernetes.io/projected/a4b9253d-0e13-4dd3-8b9a-7428281a743d-kube-api-access-dztpb\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.306063 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4b9253d-0e13-4dd3-8b9a-7428281a743d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.306088 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b9253d-0e13-4dd3-8b9a-7428281a743d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.306124 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4b9253d-0e13-4dd3-8b9a-7428281a743d-config\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.306179 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3ee5d8be-9313-44b1-bc64-88cd05ff95a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3ee5d8be-9313-44b1-bc64-88cd05ff95a8\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.306255 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b9253d-0e13-4dd3-8b9a-7428281a743d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.306294 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a4b9253d-0e13-4dd3-8b9a-7428281a743d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.306345 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4b9253d-0e13-4dd3-8b9a-7428281a743d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.307120 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a4b9253d-0e13-4dd3-8b9a-7428281a743d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.307360 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4b9253d-0e13-4dd3-8b9a-7428281a743d-config\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.307710 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4b9253d-0e13-4dd3-8b9a-7428281a743d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.309620 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.309664 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3ee5d8be-9313-44b1-bc64-88cd05ff95a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3ee5d8be-9313-44b1-bc64-88cd05ff95a8\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/125d7cbda85206c89d737c5079b113421f9c6b786a9646d3a7460968072cc851/globalmount\"" pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.317747 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b9253d-0e13-4dd3-8b9a-7428281a743d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.320893 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b9253d-0e13-4dd3-8b9a-7428281a743d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.323046 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4b9253d-0e13-4dd3-8b9a-7428281a743d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.326640 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dztpb\" (UniqueName: \"kubernetes.io/projected/a4b9253d-0e13-4dd3-8b9a-7428281a743d-kube-api-access-dztpb\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:42 crc kubenswrapper[4840]: I1209 17:15:42.354074 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3ee5d8be-9313-44b1-bc64-88cd05ff95a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3ee5d8be-9313-44b1-bc64-88cd05ff95a8\") pod \"ovsdbserver-sb-0\" (UID: \"a4b9253d-0e13-4dd3-8b9a-7428281a743d\") " pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.171455 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.897117 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn"] Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.898154 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.902149 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-distributor-http" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.902217 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-distributor-grpc" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.902315 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"cloudkitty-lokistack-ca-bundle" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.907601 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"cloudkitty-lokistack-config" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.907882 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-dockercfg-29bqr" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.928236 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn"] Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.984602 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-distributor-http\" (UniqueName: \"kubernetes.io/secret/be53cf19-ee08-4a03-96d8-5899cd1f59ec-cloudkitty-lokistack-distributor-http\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.984652 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be53cf19-ee08-4a03-96d8-5899cd1f59ec-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.984934 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be53cf19-ee08-4a03-96d8-5899cd1f59ec-config\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.985018 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/be53cf19-ee08-4a03-96d8-5899cd1f59ec-cloudkitty-lokistack-distributor-grpc\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:43 crc kubenswrapper[4840]: I1209 17:15:43.985042 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qr6r\" (UniqueName: \"kubernetes.io/projected/be53cf19-ee08-4a03-96d8-5899cd1f59ec-kube-api-access-4qr6r\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.086201 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-distributor-http\" (UniqueName: \"kubernetes.io/secret/be53cf19-ee08-4a03-96d8-5899cd1f59ec-cloudkitty-lokistack-distributor-http\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.086250 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be53cf19-ee08-4a03-96d8-5899cd1f59ec-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.086321 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be53cf19-ee08-4a03-96d8-5899cd1f59ec-config\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.086345 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/be53cf19-ee08-4a03-96d8-5899cd1f59ec-cloudkitty-lokistack-distributor-grpc\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.087331 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be53cf19-ee08-4a03-96d8-5899cd1f59ec-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.087463 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be53cf19-ee08-4a03-96d8-5899cd1f59ec-config\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.086361 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qr6r\" (UniqueName: \"kubernetes.io/projected/be53cf19-ee08-4a03-96d8-5899cd1f59ec-kube-api-access-4qr6r\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.093337 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/be53cf19-ee08-4a03-96d8-5899cd1f59ec-cloudkitty-lokistack-distributor-grpc\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.095448 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-distributor-http\" (UniqueName: \"kubernetes.io/secret/be53cf19-ee08-4a03-96d8-5899cd1f59ec-cloudkitty-lokistack-distributor-http\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.117178 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qr6r\" (UniqueName: \"kubernetes.io/projected/be53cf19-ee08-4a03-96d8-5899cd1f59ec-kube-api-access-4qr6r\") pod \"cloudkitty-lokistack-distributor-664b687b54-mtfrn\" (UID: \"be53cf19-ee08-4a03-96d8-5899cd1f59ec\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.158366 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88"] Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.159404 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.162376 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-querier-grpc" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.162475 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-loki-s3" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.162568 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-querier-http" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.188526 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf23999-5210-4106-aa05-9ac1c07da2a1-config\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.188602 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.188621 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-querier-http\" (UniqueName: \"kubernetes.io/secret/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-lokistack-querier-http\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.188641 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pfxg\" (UniqueName: \"kubernetes.io/projected/4cf23999-5210-4106-aa05-9ac1c07da2a1-kube-api-access-8pfxg\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.188852 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.188914 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-querier-grpc\" (UniqueName: \"kubernetes.io/secret/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-lokistack-querier-grpc\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.189947 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88"] Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.214111 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.242067 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5"] Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.243330 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.247844 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-query-frontend-grpc" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.251299 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-query-frontend-http" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.261455 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5"] Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.290205 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.290242 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7838bb86-5c5f-4100-aa6c-442e1e591645-config\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.290277 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-querier-grpc\" (UniqueName: \"kubernetes.io/secret/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-lokistack-querier-grpc\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.290305 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7838bb86-5c5f-4100-aa6c-442e1e591645-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.290328 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf23999-5210-4106-aa05-9ac1c07da2a1-config\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.290354 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/7838bb86-5c5f-4100-aa6c-442e1e591645-cloudkitty-lokistack-query-frontend-grpc\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.290382 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/7838bb86-5c5f-4100-aa6c-442e1e591645-cloudkitty-lokistack-query-frontend-http\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.290413 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.290432 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-querier-http\" (UniqueName: \"kubernetes.io/secret/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-lokistack-querier-http\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.290450 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pfxg\" (UniqueName: \"kubernetes.io/projected/4cf23999-5210-4106-aa05-9ac1c07da2a1-kube-api-access-8pfxg\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.290490 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vj4g5\" (UniqueName: \"kubernetes.io/projected/7838bb86-5c5f-4100-aa6c-442e1e591645-kube-api-access-vj4g5\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.291122 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.291252 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf23999-5210-4106-aa05-9ac1c07da2a1-config\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.303925 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.312551 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-querier-grpc\" (UniqueName: \"kubernetes.io/secret/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-lokistack-querier-grpc\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.312765 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-querier-http\" (UniqueName: \"kubernetes.io/secret/4cf23999-5210-4106-aa05-9ac1c07da2a1-cloudkitty-lokistack-querier-http\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.319452 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pfxg\" (UniqueName: \"kubernetes.io/projected/4cf23999-5210-4106-aa05-9ac1c07da2a1-kube-api-access-8pfxg\") pod \"cloudkitty-lokistack-querier-5467947bf7-xzc88\" (UID: \"4cf23999-5210-4106-aa05-9ac1c07da2a1\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.354772 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8"] Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.355811 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.361050 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"cloudkitty-lokistack-gateway-ca-bundle" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.361068 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"cloudkitty-lokistack-gateway" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.361182 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-gateway-client-http" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.361371 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-gateway" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.361581 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-gateway-http" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.361778 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"cloudkitty-lokistack-ca" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.369164 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp"] Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.370683 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.375211 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-gateway-dockercfg-sm6dg" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.382183 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8"] Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.392498 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.392570 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.392600 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.392644 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.392682 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.392731 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmhq2\" (UniqueName: \"kubernetes.io/projected/64550645-76ad-4518-ad64-74d530e0a4f1-kube-api-access-lmhq2\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.392766 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.392817 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7838bb86-5c5f-4100-aa6c-442e1e591645-config\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.392879 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.392916 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393034 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7838bb86-5c5f-4100-aa6c-442e1e591645-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393077 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393138 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/7838bb86-5c5f-4100-aa6c-442e1e591645-cloudkitty-lokistack-query-frontend-grpc\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393188 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393232 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/7838bb86-5c5f-4100-aa6c-442e1e591645-cloudkitty-lokistack-query-frontend-http\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393291 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393355 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393394 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snnhz\" (UniqueName: \"kubernetes.io/projected/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-kube-api-access-snnhz\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393451 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393483 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393534 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393564 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.393628 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vj4g5\" (UniqueName: \"kubernetes.io/projected/7838bb86-5c5f-4100-aa6c-442e1e591645-kube-api-access-vj4g5\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.394535 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7838bb86-5c5f-4100-aa6c-442e1e591645-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.395301 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7838bb86-5c5f-4100-aa6c-442e1e591645-config\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.400362 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp"] Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.406049 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/7838bb86-5c5f-4100-aa6c-442e1e591645-cloudkitty-lokistack-query-frontend-http\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.412518 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/7838bb86-5c5f-4100-aa6c-442e1e591645-cloudkitty-lokistack-query-frontend-grpc\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.413082 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vj4g5\" (UniqueName: \"kubernetes.io/projected/7838bb86-5c5f-4100-aa6c-442e1e591645-kube-api-access-vj4g5\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5\" (UID: \"7838bb86-5c5f-4100-aa6c-442e1e591645\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.478569 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495649 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495700 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495742 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495768 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495797 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snnhz\" (UniqueName: \"kubernetes.io/projected/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-kube-api-access-snnhz\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495818 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495835 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495857 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495875 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495924 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495939 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495953 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.495984 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.496007 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.496029 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmhq2\" (UniqueName: \"kubernetes.io/projected/64550645-76ad-4518-ad64-74d530e0a4f1-kube-api-access-lmhq2\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.496052 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.496095 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.496117 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: E1209 17:15:44.497101 4840 secret.go:188] Couldn't get secret openstack/cloudkitty-lokistack-gateway-http: secret "cloudkitty-lokistack-gateway-http" not found Dec 09 17:15:44 crc kubenswrapper[4840]: E1209 17:15:44.497183 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-tls-secret podName:4e1e443c-6d35-4788-8cd7-dae8911ffc1e nodeName:}" failed. No retries permitted until 2025-12-09 17:15:44.997160125 +0000 UTC m=+1130.988270758 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-tls-secret") pod "cloudkitty-lokistack-gateway-bc75944f-b9cg8" (UID: "4e1e443c-6d35-4788-8cd7-dae8911ffc1e") : secret "cloudkitty-lokistack-gateway-http" not found Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.497115 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: E1209 17:15:44.497837 4840 secret.go:188] Couldn't get secret openstack/cloudkitty-lokistack-gateway-http: secret "cloudkitty-lokistack-gateway-http" not found Dec 09 17:15:44 crc kubenswrapper[4840]: E1209 17:15:44.497916 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-tls-secret podName:64550645-76ad-4518-ad64-74d530e0a4f1 nodeName:}" failed. No retries permitted until 2025-12-09 17:15:44.997899237 +0000 UTC m=+1130.989009870 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-tls-secret") pod "cloudkitty-lokistack-gateway-bc75944f-fj2hp" (UID: "64550645-76ad-4518-ad64-74d530e0a4f1") : secret "cloudkitty-lokistack-gateway-http" not found Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.498265 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.498506 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.498575 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.498732 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.498751 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.500348 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.500880 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.502320 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.503271 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.506528 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.507254 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.507625 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64550645-76ad-4518-ad64-74d530e0a4f1-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.507934 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.514400 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmhq2\" (UniqueName: \"kubernetes.io/projected/64550645-76ad-4518-ad64-74d530e0a4f1-kube-api-access-lmhq2\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.515180 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snnhz\" (UniqueName: \"kubernetes.io/projected/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-kube-api-access-snnhz\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:44 crc kubenswrapper[4840]: I1209 17:15:44.565303 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.003703 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.003848 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.007188 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4e1e443c-6d35-4788-8cd7-dae8911ffc1e-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-b9cg8\" (UID: \"4e1e443c-6d35-4788-8cd7-dae8911ffc1e\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.008479 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/64550645-76ad-4518-ad64-74d530e0a4f1-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-fj2hp\" (UID: \"64550645-76ad-4518-ad64-74d530e0a4f1\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.145478 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-ingester-0"] Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.147458 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.151110 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-ingester-http" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.151339 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-ingester-grpc" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.159751 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-ingester-0"] Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.237783 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-compactor-0"] Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.239571 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.241423 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-compactor-http" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.241641 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-compactor-grpc" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.254298 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-compactor-0"] Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.276766 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.293094 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.308291 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-index-gateway-0"] Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.310657 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.314393 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-lokistack-ingester-grpc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.314482 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b45f4212-4ee0-4679-b115-d8d231bf946d-config\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.314595 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.314681 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.314711 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.314867 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ingester-http\" (UniqueName: \"kubernetes.io/secret/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-lokistack-ingester-http\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.314947 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrldj\" (UniqueName: \"kubernetes.io/projected/b45f4212-4ee0-4679-b115-d8d231bf946d-kube-api-access-hrldj\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.315069 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.318408 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-index-gateway-grpc" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.319386 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-index-gateway-http" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.324036 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-index-gateway-0"] Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.416920 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-lokistack-compactor-grpc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.417010 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.417037 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.417157 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.417209 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.417272 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3049524c-ff2b-4c18-baf0-c15c182583cc-config\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.417507 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.417503 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-lokistack-index-gateway-http\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.417635 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.417665 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ingester-http\" (UniqueName: \"kubernetes.io/secret/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-lokistack-ingester-http\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.417892 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.417910 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrldj\" (UniqueName: \"kubernetes.io/projected/b45f4212-4ee0-4679-b115-d8d231bf946d-kube-api-access-hrldj\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.418170 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-lokistack-index-gateway-grpc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.418371 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.418416 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-compactor-http\" (UniqueName: \"kubernetes.io/secret/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-lokistack-compactor-http\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.420544 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.421312 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.421391 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.421470 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.421520 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.421567 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-lokistack-ingester-grpc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.421667 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfmnj\" (UniqueName: \"kubernetes.io/projected/374f81b4-2b45-4e8a-9b41-898b64e5623f-kube-api-access-mfmnj\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.421766 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25qjz\" (UniqueName: \"kubernetes.io/projected/3049524c-ff2b-4c18-baf0-c15c182583cc-kube-api-access-25qjz\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.421797 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b45f4212-4ee0-4679-b115-d8d231bf946d-config\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.421862 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/374f81b4-2b45-4e8a-9b41-898b64e5623f-config\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.422789 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ingester-http\" (UniqueName: \"kubernetes.io/secret/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-lokistack-ingester-http\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.422943 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b45f4212-4ee0-4679-b115-d8d231bf946d-config\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.423661 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.425531 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/b45f4212-4ee0-4679-b115-d8d231bf946d-cloudkitty-lokistack-ingester-grpc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.450013 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrldj\" (UniqueName: \"kubernetes.io/projected/b45f4212-4ee0-4679-b115-d8d231bf946d-kube-api-access-hrldj\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.463261 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.464684 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"b45f4212-4ee0-4679-b115-d8d231bf946d\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.513948 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.523369 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.523432 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.523465 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.523505 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfmnj\" (UniqueName: \"kubernetes.io/projected/374f81b4-2b45-4e8a-9b41-898b64e5623f-kube-api-access-mfmnj\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.523548 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25qjz\" (UniqueName: \"kubernetes.io/projected/3049524c-ff2b-4c18-baf0-c15c182583cc-kube-api-access-25qjz\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.523583 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.523593 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/374f81b4-2b45-4e8a-9b41-898b64e5623f-config\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.523762 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.523816 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-lokistack-compactor-grpc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.523885 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.523982 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3049524c-ff2b-4c18-baf0-c15c182583cc-config\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.524300 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-lokistack-index-gateway-http\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.524336 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.524425 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-lokistack-index-gateway-grpc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.524487 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-compactor-http\" (UniqueName: \"kubernetes.io/secret/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-lokistack-compactor-http\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.524517 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.525175 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/374f81b4-2b45-4e8a-9b41-898b64e5623f-config\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.525329 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3049524c-ff2b-4c18-baf0-c15c182583cc-config\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.525911 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.526008 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.531901 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-lokistack-index-gateway-grpc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.532093 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-lokistack-compactor-grpc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.532161 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-lokistack-index-gateway-http\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.532729 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/374f81b4-2b45-4e8a-9b41-898b64e5623f-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.534191 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.545197 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25qjz\" (UniqueName: \"kubernetes.io/projected/3049524c-ff2b-4c18-baf0-c15c182583cc-kube-api-access-25qjz\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.548487 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfmnj\" (UniqueName: \"kubernetes.io/projected/374f81b4-2b45-4e8a-9b41-898b64e5623f-kube-api-access-mfmnj\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.553903 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.561618 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"374f81b4-2b45-4e8a-9b41-898b64e5623f\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:45 crc kubenswrapper[4840]: I1209 17:15:45.640271 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:15:46 crc kubenswrapper[4840]: I1209 17:15:46.784530 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-compactor-http\" (UniqueName: \"kubernetes.io/secret/3049524c-ff2b-4c18-baf0-c15c182583cc-cloudkitty-lokistack-compactor-http\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"3049524c-ff2b-4c18-baf0-c15c182583cc\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:47 crc kubenswrapper[4840]: I1209 17:15:47.063348 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:15:49 crc kubenswrapper[4840]: W1209 17:15:49.308290 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cf8f20c_e36a_4ed4_b627_3b88423123c9.slice/crio-735e114c01e91c78809c536689a736179ea47b43fbc8343579dc8ffae0b7a939 WatchSource:0}: Error finding container 735e114c01e91c78809c536689a736179ea47b43fbc8343579dc8ffae0b7a939: Status 404 returned error can't find the container with id 735e114c01e91c78809c536689a736179ea47b43fbc8343579dc8ffae0b7a939 Dec 09 17:15:49 crc kubenswrapper[4840]: I1209 17:15:49.740886 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 09 17:15:50 crc kubenswrapper[4840]: I1209 17:15:50.269354 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6cf8f20c-e36a-4ed4-b627-3b88423123c9","Type":"ContainerStarted","Data":"735e114c01e91c78809c536689a736179ea47b43fbc8343579dc8ffae0b7a939"} Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.454245 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.454418 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x96jx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-62c2p_openstack(bae6b28b-2f89-44c6-b196-145e099e94e5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.456489 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" podUID="bae6b28b-2f89-44c6-b196-145e099e94e5" Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.458423 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.458594 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q5fz6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-54clj_openstack(79caba5d-a685-43b0-a7b0-b0ca20543323): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.459904 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" podUID="79caba5d-a685-43b0-a7b0-b0ca20543323" Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.474179 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.474315 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qq2wf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-vngnf_openstack(14bb8342-d1b2-4dc4-a9d4-27ebe092be4c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.475416 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" podUID="14bb8342-d1b2-4dc4-a9d4-27ebe092be4c" Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.492518 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.492662 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dd9cr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-59s29_openstack(956f73a1-e1d1-4cc8-aa72-d9be637417b1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 17:15:50 crc kubenswrapper[4840]: E1209 17:15:50.494033 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-59s29" podUID="956f73a1-e1d1-4cc8-aa72-d9be637417b1" Dec 09 17:15:50 crc kubenswrapper[4840]: I1209 17:15:50.916062 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Dec 09 17:15:51 crc kubenswrapper[4840]: I1209 17:15:51.001142 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 09 17:15:51 crc kubenswrapper[4840]: I1209 17:15:51.063748 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5"] Dec 09 17:15:51 crc kubenswrapper[4840]: I1209 17:15:51.284522 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8","Type":"ContainerStarted","Data":"82f7a5cb15e121ad3de7e1d90e5909ed6e1da2f76b09f470fe40e80bf8dbd581"} Dec 09 17:15:51 crc kubenswrapper[4840]: E1209 17:15:51.287199 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-59s29" podUID="956f73a1-e1d1-4cc8-aa72-d9be637417b1" Dec 09 17:15:51 crc kubenswrapper[4840]: E1209 17:15:51.287830 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" podUID="bae6b28b-2f89-44c6-b196-145e099e94e5" Dec 09 17:15:51 crc kubenswrapper[4840]: E1209 17:15:51.923909 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Dec 09 17:15:51 crc kubenswrapper[4840]: E1209 17:15:51.924398 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jb8w2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(9b2bc342-2987-4fc2-b078-bc5aa00c063d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 17:15:51 crc kubenswrapper[4840]: E1209 17:15:51.925903 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.105773 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.124281 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.147364 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79caba5d-a685-43b0-a7b0-b0ca20543323-config\") pod \"79caba5d-a685-43b0-a7b0-b0ca20543323\" (UID: \"79caba5d-a685-43b0-a7b0-b0ca20543323\") " Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.147544 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5fz6\" (UniqueName: \"kubernetes.io/projected/79caba5d-a685-43b0-a7b0-b0ca20543323-kube-api-access-q5fz6\") pod \"79caba5d-a685-43b0-a7b0-b0ca20543323\" (UID: \"79caba5d-a685-43b0-a7b0-b0ca20543323\") " Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.149222 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79caba5d-a685-43b0-a7b0-b0ca20543323-config" (OuterVolumeSpecName: "config") pod "79caba5d-a685-43b0-a7b0-b0ca20543323" (UID: "79caba5d-a685-43b0-a7b0-b0ca20543323"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.153666 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79caba5d-a685-43b0-a7b0-b0ca20543323-kube-api-access-q5fz6" (OuterVolumeSpecName: "kube-api-access-q5fz6") pod "79caba5d-a685-43b0-a7b0-b0ca20543323" (UID: "79caba5d-a685-43b0-a7b0-b0ca20543323"). InnerVolumeSpecName "kube-api-access-q5fz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.248805 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-config\") pod \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.248916 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-dns-svc\") pod \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.249016 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qq2wf\" (UniqueName: \"kubernetes.io/projected/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-kube-api-access-qq2wf\") pod \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\" (UID: \"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c\") " Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.249340 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-config" (OuterVolumeSpecName: "config") pod "14bb8342-d1b2-4dc4-a9d4-27ebe092be4c" (UID: "14bb8342-d1b2-4dc4-a9d4-27ebe092be4c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.249688 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "14bb8342-d1b2-4dc4-a9d4-27ebe092be4c" (UID: "14bb8342-d1b2-4dc4-a9d4-27ebe092be4c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.250667 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.250682 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.250691 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5fz6\" (UniqueName: \"kubernetes.io/projected/79caba5d-a685-43b0-a7b0-b0ca20543323-kube-api-access-q5fz6\") on node \"crc\" DevicePath \"\"" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.250701 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79caba5d-a685-43b0-a7b0-b0ca20543323-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.252208 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-kube-api-access-qq2wf" (OuterVolumeSpecName: "kube-api-access-qq2wf") pod "14bb8342-d1b2-4dc4-a9d4-27ebe092be4c" (UID: "14bb8342-d1b2-4dc4-a9d4-27ebe092be4c"). InnerVolumeSpecName "kube-api-access-qq2wf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.292291 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"71339989-bd1f-4da4-8976-62fbb767a30e","Type":"ContainerStarted","Data":"0b5267d7f1c9bbd191f4c4c20356a17d49d843c8e7b4120d3b523e299b65ed43"} Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.293575 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" event={"ID":"14bb8342-d1b2-4dc4-a9d4-27ebe092be4c","Type":"ContainerDied","Data":"1ec27355dc93e4164b93f0d18d19fbb7b04ecc5c84f31919f22e7ef60e110f59"} Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.293611 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-vngnf" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.294567 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" event={"ID":"79caba5d-a685-43b0-a7b0-b0ca20543323","Type":"ContainerDied","Data":"dd7ed559fe81c910c5876a0f5142914daf387237f7aea6a3c3172efc7b7b9531"} Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.294586 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-54clj" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.295989 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"694a7e34-d1ce-4a1b-8475-fbb5d250b955","Type":"ContainerStarted","Data":"4ee2b98987f46898d9b742683c8995934847bbb511526eaafbbfa3178634d029"} Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.297602 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" event={"ID":"7838bb86-5c5f-4100-aa6c-442e1e591645","Type":"ContainerStarted","Data":"ad9676c242a3bc84428f387eaf261c808ccb92ad10e5e2ee22b473c3545b992e"} Dec 09 17:15:52 crc kubenswrapper[4840]: E1209 17:15:52.300030 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.353497 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qq2wf\" (UniqueName: \"kubernetes.io/projected/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c-kube-api-access-qq2wf\") on node \"crc\" DevicePath \"\"" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.386109 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-vngnf"] Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.409029 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-vngnf"] Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.438844 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-54clj"] Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.462364 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-54clj"] Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.472428 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-v78xq"] Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.483699 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp"] Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.623930 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14bb8342-d1b2-4dc4-a9d4-27ebe092be4c" path="/var/lib/kubelet/pods/14bb8342-d1b2-4dc4-a9d4-27ebe092be4c/volumes" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.624766 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79caba5d-a685-43b0-a7b0-b0ca20543323" path="/var/lib/kubelet/pods/79caba5d-a685-43b0-a7b0-b0ca20543323/volumes" Dec 09 17:15:52 crc kubenswrapper[4840]: I1209 17:15:52.828064 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-6vxgb"] Dec 09 17:15:53 crc kubenswrapper[4840]: W1209 17:15:53.571512 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64550645_76ad_4518_ad64_74d530e0a4f1.slice/crio-2bcf3d4fef11ba4332a34e89203e119df42ac290f17a33a45900e9b6437cbf61 WatchSource:0}: Error finding container 2bcf3d4fef11ba4332a34e89203e119df42ac290f17a33a45900e9b6437cbf61: Status 404 returned error can't find the container with id 2bcf3d4fef11ba4332a34e89203e119df42ac290f17a33a45900e9b6437cbf61 Dec 09 17:15:53 crc kubenswrapper[4840]: W1209 17:15:53.577620 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3189254_8cff_481a_92f3_466a928de54e.slice/crio-3919d9a48ac1186d1aaa2eac395eb0e6b328be1885ab46166f30893a80001329 WatchSource:0}: Error finding container 3919d9a48ac1186d1aaa2eac395eb0e6b328be1885ab46166f30893a80001329: Status 404 returned error can't find the container with id 3919d9a48ac1186d1aaa2eac395eb0e6b328be1885ab46166f30893a80001329 Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.212461 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 09 17:15:54 crc kubenswrapper[4840]: W1209 17:15:54.222512 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4b9253d_0e13_4dd3_8b9a_7428281a743d.slice/crio-1072ab837fbbd9f4a02a848ffbbedf1f6858624329b3a44876f02273967cfd48 WatchSource:0}: Error finding container 1072ab837fbbd9f4a02a848ffbbedf1f6858624329b3a44876f02273967cfd48: Status 404 returned error can't find the container with id 1072ab837fbbd9f4a02a848ffbbedf1f6858624329b3a44876f02273967cfd48 Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.326758 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"05e98f63a75f1d00a4b05aafffb49ac3d5f6082b4645459897faa5f48fc3ff01"} Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.331831 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-v78xq" event={"ID":"637ab881-6952-409f-8e9d-619aaf72fb51","Type":"ContainerStarted","Data":"17c7ab537df5411b303e8a8aa8b29b31564e68b4ead6fc2874a20d6805464158"} Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.333614 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6cf8f20c-e36a-4ed4-b627-3b88423123c9","Type":"ContainerStarted","Data":"768fc79946fe3263989075338b5cc84c6e577363ff5f7d0bca2b880f77785b5b"} Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.334004 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.335503 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6vxgb" event={"ID":"d3189254-8cff-481a-92f3-466a928de54e","Type":"ContainerStarted","Data":"3919d9a48ac1186d1aaa2eac395eb0e6b328be1885ab46166f30893a80001329"} Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.337155 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" event={"ID":"64550645-76ad-4518-ad64-74d530e0a4f1","Type":"ContainerStarted","Data":"2bcf3d4fef11ba4332a34e89203e119df42ac290f17a33a45900e9b6437cbf61"} Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.348283 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a4b9253d-0e13-4dd3-8b9a-7428281a743d","Type":"ContainerStarted","Data":"1072ab837fbbd9f4a02a848ffbbedf1f6858624329b3a44876f02273967cfd48"} Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.366724 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=19.999650718 podStartE2EDuration="24.366700388s" podCreationTimestamp="2025-12-09 17:15:30 +0000 UTC" firstStartedPulling="2025-12-09 17:15:49.310836334 +0000 UTC m=+1135.301946967" lastFinishedPulling="2025-12-09 17:15:53.677886004 +0000 UTC m=+1139.668996637" observedRunningTime="2025-12-09 17:15:54.355592848 +0000 UTC m=+1140.346703501" watchObservedRunningTime="2025-12-09 17:15:54.366700388 +0000 UTC m=+1140.357811021" Dec 09 17:15:54 crc kubenswrapper[4840]: W1209 17:15:54.382813 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4e1e443c_6d35_4788_8cd7_dae8911ffc1e.slice/crio-4e3abc839f0eddcb0e9378f9b2c369de8c8e25d5427d3d4a803f5313997c2f78 WatchSource:0}: Error finding container 4e3abc839f0eddcb0e9378f9b2c369de8c8e25d5427d3d4a803f5313997c2f78: Status 404 returned error can't find the container with id 4e3abc839f0eddcb0e9378f9b2c369de8c8e25d5427d3d4a803f5313997c2f78 Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.391286 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8"] Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.405653 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.858925 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-compactor-0"] Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.881087 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-index-gateway-0"] Dec 09 17:15:54 crc kubenswrapper[4840]: W1209 17:15:54.897459 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod374f81b4_2b45_4e8a_9b41_898b64e5623f.slice/crio-71ff072c1df6abd77148072cd32a4d2fcc6ca7d12ffbb37cdc5449062a3a22ab WatchSource:0}: Error finding container 71ff072c1df6abd77148072cd32a4d2fcc6ca7d12ffbb37cdc5449062a3a22ab: Status 404 returned error can't find the container with id 71ff072c1df6abd77148072cd32a4d2fcc6ca7d12ffbb37cdc5449062a3a22ab Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.901794 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88"] Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.912037 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn"] Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.921302 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-ingester-0"] Dec 09 17:15:54 crc kubenswrapper[4840]: W1209 17:15:54.943132 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4cf23999_5210_4106_aa05_9ac1c07da2a1.slice/crio-4d0b9b2c8cc81173da35a038ced1f0592e85b556b2f279db6ed602a1b9de6043 WatchSource:0}: Error finding container 4d0b9b2c8cc81173da35a038ced1f0592e85b556b2f279db6ed602a1b9de6043: Status 404 returned error can't find the container with id 4d0b9b2c8cc81173da35a038ced1f0592e85b556b2f279db6ed602a1b9de6043 Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.954074 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 17:15:54 crc kubenswrapper[4840]: W1209 17:15:54.959579 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe53cf19_ee08_4a03_96d8_5899cd1f59ec.slice/crio-3df096dda1c696464ce1fe398fefc1970c662870ac1e5d9b7684b4006f881c51 WatchSource:0}: Error finding container 3df096dda1c696464ce1fe398fefc1970c662870ac1e5d9b7684b4006f881c51: Status 404 returned error can't find the container with id 3df096dda1c696464ce1fe398fefc1970c662870ac1e5d9b7684b4006f881c51 Dec 09 17:15:54 crc kubenswrapper[4840]: I1209 17:15:54.963856 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 09 17:15:55 crc kubenswrapper[4840]: I1209 17:15:55.416798 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"7b1009e9-391e-4a13-8d90-f55fb6c3b329","Type":"ContainerStarted","Data":"1244ae192fc8cd1b37710db2f55d24621d1d03eaa66abc2d5a6c9184ebeb9caf"} Dec 09 17:15:55 crc kubenswrapper[4840]: I1209 17:15:55.423132 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"13330488-1e93-4a88-8f15-331ee0b935cf","Type":"ContainerStarted","Data":"6274cdbe34fbcdb610c4c75f363a6dcd672e44140554acd80c3ebd84a5519342"} Dec 09 17:15:55 crc kubenswrapper[4840]: I1209 17:15:55.425710 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-compactor-0" event={"ID":"3049524c-ff2b-4c18-baf0-c15c182583cc","Type":"ContainerStarted","Data":"cc6daf10ff26b4722b4471648fa79ea6667ae348355fdd6733399a67648f2df5"} Dec 09 17:15:55 crc kubenswrapper[4840]: I1209 17:15:55.434034 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-index-gateway-0" event={"ID":"374f81b4-2b45-4e8a-9b41-898b64e5623f","Type":"ContainerStarted","Data":"71ff072c1df6abd77148072cd32a4d2fcc6ca7d12ffbb37cdc5449062a3a22ab"} Dec 09 17:15:55 crc kubenswrapper[4840]: I1209 17:15:55.437006 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" event={"ID":"4cf23999-5210-4106-aa05-9ac1c07da2a1","Type":"ContainerStarted","Data":"4d0b9b2c8cc81173da35a038ced1f0592e85b556b2f279db6ed602a1b9de6043"} Dec 09 17:15:55 crc kubenswrapper[4840]: I1209 17:15:55.470730 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e90b0e97-f543-4d75-bb6d-8d96c7b3d663","Type":"ContainerStarted","Data":"85fe337a4ed08a14d08de3b6fac86b7d5fe320f46633b88e075256ca9d971826"} Dec 09 17:15:55 crc kubenswrapper[4840]: I1209 17:15:55.472606 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-ingester-0" event={"ID":"b45f4212-4ee0-4679-b115-d8d231bf946d","Type":"ContainerStarted","Data":"680feb6c1e9e87d81871954559a07c5ded6c0e1008278dda9430595b0703f696"} Dec 09 17:15:55 crc kubenswrapper[4840]: I1209 17:15:55.482235 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" event={"ID":"be53cf19-ee08-4a03-96d8-5899cd1f59ec","Type":"ContainerStarted","Data":"3df096dda1c696464ce1fe398fefc1970c662870ac1e5d9b7684b4006f881c51"} Dec 09 17:15:55 crc kubenswrapper[4840]: I1209 17:15:55.496058 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" event={"ID":"4e1e443c-6d35-4788-8cd7-dae8911ffc1e","Type":"ContainerStarted","Data":"4e3abc839f0eddcb0e9378f9b2c369de8c8e25d5427d3d4a803f5313997c2f78"} Dec 09 17:15:56 crc kubenswrapper[4840]: I1209 17:15:56.507206 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7170c3b2-9d93-4736-8ade-66423bc4a081","Type":"ContainerStarted","Data":"2365d04c83aa2f9dd63e9cee92a15b62873fc69fa0193a589f63c05ec42bf785"} Dec 09 17:16:01 crc kubenswrapper[4840]: I1209 17:16:01.026298 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.287054 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-62c2p"] Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.337671 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkfkl"] Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.339361 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.373521 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkfkl"] Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.498085 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-config\") pod \"dnsmasq-dns-7cb5889db5-bkfkl\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.498265 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-bkfkl\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.498348 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4fw7\" (UniqueName: \"kubernetes.io/projected/4b4945f7-403c-4d4a-aa3c-882776dc0240-kube-api-access-b4fw7\") pod \"dnsmasq-dns-7cb5889db5-bkfkl\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.603473 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4fw7\" (UniqueName: \"kubernetes.io/projected/4b4945f7-403c-4d4a-aa3c-882776dc0240-kube-api-access-b4fw7\") pod \"dnsmasq-dns-7cb5889db5-bkfkl\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.605579 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-config\") pod \"dnsmasq-dns-7cb5889db5-bkfkl\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.605844 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-bkfkl\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.607602 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-bkfkl\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.608150 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-config\") pod \"dnsmasq-dns-7cb5889db5-bkfkl\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.630603 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4fw7\" (UniqueName: \"kubernetes.io/projected/4b4945f7-403c-4d4a-aa3c-882776dc0240-kube-api-access-b4fw7\") pod \"dnsmasq-dns-7cb5889db5-bkfkl\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:03 crc kubenswrapper[4840]: I1209 17:16:03.680607 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.502797 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.511861 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.519144 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.519500 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.519649 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.520356 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-8p95b" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.531505 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.635803 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5mqf\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-kube-api-access-c5mqf\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.635868 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.635941 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-2f66a5de-edfa-4297-9080-d49ac4ab0082\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2f66a5de-edfa-4297-9080-d49ac4ab0082\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.636032 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-cache\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.636068 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-lock\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.738379 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5mqf\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-kube-api-access-c5mqf\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.738451 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.738541 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-2f66a5de-edfa-4297-9080-d49ac4ab0082\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2f66a5de-edfa-4297-9080-d49ac4ab0082\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.738639 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-cache\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.738684 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-lock\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: E1209 17:16:04.738698 4840 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 17:16:04 crc kubenswrapper[4840]: E1209 17:16:04.738750 4840 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 17:16:04 crc kubenswrapper[4840]: E1209 17:16:04.738836 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift podName:fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534 nodeName:}" failed. No retries permitted until 2025-12-09 17:16:05.238786742 +0000 UTC m=+1151.229897385 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift") pod "swift-storage-0" (UID: "fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534") : configmap "swift-ring-files" not found Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.739337 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-cache\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.739396 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-lock\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.757548 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5mqf\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-kube-api-access-c5mqf\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.762130 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.762160 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-2f66a5de-edfa-4297-9080-d49ac4ab0082\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2f66a5de-edfa-4297-9080-d49ac4ab0082\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/cb705ee31a3dd36a14bde0b076635b38b007d0420eed91be3c42e08b34854439/globalmount\"" pod="openstack/swift-storage-0" Dec 09 17:16:04 crc kubenswrapper[4840]: I1209 17:16:04.799641 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-2f66a5de-edfa-4297-9080-d49ac4ab0082\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2f66a5de-edfa-4297-9080-d49ac4ab0082\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.032555 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-2xsrv"] Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.036003 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.038233 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.038440 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.038530 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.063671 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-2xsrv"] Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.151813 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-swiftconf\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.151871 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-dispersionconf\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.151917 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-ring-data-devices\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.151993 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc9p7\" (UniqueName: \"kubernetes.io/projected/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-kube-api-access-lc9p7\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.152023 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-scripts\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.152054 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-etc-swift\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.152117 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-combined-ca-bundle\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.253561 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.253630 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-swiftconf\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.253664 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-dispersionconf\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.253704 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-ring-data-devices\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.253765 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lc9p7\" (UniqueName: \"kubernetes.io/projected/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-kube-api-access-lc9p7\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.253801 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-scripts\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.253831 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-etc-swift\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.253896 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-combined-ca-bundle\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: E1209 17:16:05.254386 4840 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 17:16:05 crc kubenswrapper[4840]: E1209 17:16:05.254426 4840 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.254454 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-etc-swift\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: E1209 17:16:05.254489 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift podName:fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534 nodeName:}" failed. No retries permitted until 2025-12-09 17:16:06.254466561 +0000 UTC m=+1152.245577254 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift") pod "swift-storage-0" (UID: "fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534") : configmap "swift-ring-files" not found Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.254940 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-ring-data-devices\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.255922 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-scripts\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.259381 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-combined-ca-bundle\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.269866 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-dispersionconf\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.270713 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-swiftconf\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.272634 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lc9p7\" (UniqueName: \"kubernetes.io/projected/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-kube-api-access-lc9p7\") pod \"swift-ring-rebalance-2xsrv\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:05 crc kubenswrapper[4840]: I1209 17:16:05.361869 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:06 crc kubenswrapper[4840]: I1209 17:16:06.277850 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:06 crc kubenswrapper[4840]: E1209 17:16:06.278128 4840 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 17:16:06 crc kubenswrapper[4840]: E1209 17:16:06.279880 4840 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 17:16:06 crc kubenswrapper[4840]: E1209 17:16:06.279985 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift podName:fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534 nodeName:}" failed. No retries permitted until 2025-12-09 17:16:08.27992334 +0000 UTC m=+1154.271033973 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift") pod "swift-storage-0" (UID: "fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534") : configmap "swift-ring-files" not found Dec 09 17:16:07 crc kubenswrapper[4840]: E1209 17:16:07.119730 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/openshift-logging/logging-loki-rhel9@sha256:06b83c3cbf0c5db4dd9812e046ca14189d18cf7b3c7f2f2c37aa705cc5f5deb7" Dec 09 17:16:07 crc kubenswrapper[4840]: E1209 17:16:07.119898 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:loki-query-frontend,Image:registry.redhat.io/openshift-logging/logging-loki-rhel9@sha256:06b83c3cbf0c5db4dd9812e046ca14189d18cf7b3c7f2f2c37aa705cc5f5deb7,Command:[],Args:[-target=query-frontend -config.file=/etc/loki/config/config.yaml -runtime-config.file=/etc/loki/config/runtime-config.yaml -config.expand-env=true],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:3100,Protocol:TCP,HostIP:,},ContainerPort{Name:grpclb,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:healthchecks,HostPort:0,ContainerPort:3101,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/loki/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cloudkitty-lokistack-query-frontend-http,ReadOnly:false,MountPath:/var/run/tls/http/server,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cloudkitty-lokistack-query-frontend-grpc,ReadOnly:false,MountPath:/var/run/tls/grpc/server,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cloudkitty-lokistack-ca-bundle,ReadOnly:false,MountPath:/var/run/ca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vj4g5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/loki/api/v1/status/buildinfo,Port:{0 3101 },Host:,Scheme:HTTPS,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:2,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:10,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/loki/api/v1/status/buildinfo,Port:{0 3101 },Host:,Scheme:HTTPS,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5_openstack(7838bb86-5c5f-4100-aa6c-442e1e591645): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 17:16:07 crc kubenswrapper[4840]: E1209 17:16:07.121095 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"loki-query-frontend\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" podUID="7838bb86-5c5f-4100-aa6c-442e1e591645" Dec 09 17:16:07 crc kubenswrapper[4840]: E1209 17:16:07.142306 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified" Dec 09 17:16:07 crc kubenswrapper[4840]: E1209 17:16:07.142461 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:ovsdb-server-init,Image:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,Command:[/usr/local/bin/container-scripts/init-ovsdb-server.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n9h674h9fh699h698h56bh654h5b8h59bh544h7dh5fbh5f5h5fch7fhc9hc9h684hb9h585h689h6h87h67fh84h67bh697h7dh544h649h675h5dcq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-ovs,ReadOnly:false,MountPath:/etc/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log,ReadOnly:false,MountPath:/var/log/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-lib,ReadOnly:false,MountPath:/var/lib/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zx7bm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-ovs-6vxgb_openstack(d3189254-8cff-481a-92f3-466a928de54e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 17:16:07 crc kubenswrapper[4840]: E1209 17:16:07.143753 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-ovs-6vxgb" podUID="d3189254-8cff-481a-92f3-466a928de54e" Dec 09 17:16:07 crc kubenswrapper[4840]: E1209 17:16:07.633330 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\\\"\"" pod="openstack/ovn-controller-ovs-6vxgb" podUID="d3189254-8cff-481a-92f3-466a928de54e" Dec 09 17:16:07 crc kubenswrapper[4840]: E1209 17:16:07.633505 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"loki-query-frontend\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift-logging/logging-loki-rhel9@sha256:06b83c3cbf0c5db4dd9812e046ca14189d18cf7b3c7f2f2c37aa705cc5f5deb7\\\"\"" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" podUID="7838bb86-5c5f-4100-aa6c-442e1e591645" Dec 09 17:16:08 crc kubenswrapper[4840]: E1209 17:16:08.235615 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified" Dec 09 17:16:08 crc kubenswrapper[4840]: E1209 17:16:08.236083 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-sb,Image:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5b9hf9h5cdh557h656h674hbbh65hfch87h595h5cchfdh5b7h599hdfh54h595hbch55h5fch545h649hd6h5bh684h64dhc6h675hd6h649h667q,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-sb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dztpb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-sb-0_openstack(a4b9253d-0e13-4dd3-8b9a-7428281a743d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.330023 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.330434 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:08 crc kubenswrapper[4840]: E1209 17:16:08.330658 4840 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 17:16:08 crc kubenswrapper[4840]: E1209 17:16:08.330696 4840 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 17:16:08 crc kubenswrapper[4840]: E1209 17:16:08.330777 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift podName:fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534 nodeName:}" failed. No retries permitted until 2025-12-09 17:16:12.330751613 +0000 UTC m=+1158.321862276 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift") pod "swift-storage-0" (UID: "fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534") : configmap "swift-ring-files" not found Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.432010 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-dns-svc\") pod \"bae6b28b-2f89-44c6-b196-145e099e94e5\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.432246 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-config\") pod \"bae6b28b-2f89-44c6-b196-145e099e94e5\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.432275 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x96jx\" (UniqueName: \"kubernetes.io/projected/bae6b28b-2f89-44c6-b196-145e099e94e5-kube-api-access-x96jx\") pod \"bae6b28b-2f89-44c6-b196-145e099e94e5\" (UID: \"bae6b28b-2f89-44c6-b196-145e099e94e5\") " Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.432561 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bae6b28b-2f89-44c6-b196-145e099e94e5" (UID: "bae6b28b-2f89-44c6-b196-145e099e94e5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.432697 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.433142 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-config" (OuterVolumeSpecName: "config") pod "bae6b28b-2f89-44c6-b196-145e099e94e5" (UID: "bae6b28b-2f89-44c6-b196-145e099e94e5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.435759 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bae6b28b-2f89-44c6-b196-145e099e94e5-kube-api-access-x96jx" (OuterVolumeSpecName: "kube-api-access-x96jx") pod "bae6b28b-2f89-44c6-b196-145e099e94e5" (UID: "bae6b28b-2f89-44c6-b196-145e099e94e5"). InnerVolumeSpecName "kube-api-access-x96jx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.534652 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae6b28b-2f89-44c6-b196-145e099e94e5-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.534678 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x96jx\" (UniqueName: \"kubernetes.io/projected/bae6b28b-2f89-44c6-b196-145e099e94e5-kube-api-access-x96jx\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.639953 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" event={"ID":"bae6b28b-2f89-44c6-b196-145e099e94e5","Type":"ContainerDied","Data":"53501e3741eb5a9c69cb179a434b15d6a0a19951f09c462b5ab387c4978edd7d"} Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.640090 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-62c2p" Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.685344 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-62c2p"] Dec 09 17:16:08 crc kubenswrapper[4840]: I1209 17:16:08.696243 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-62c2p"] Dec 09 17:16:09 crc kubenswrapper[4840]: I1209 17:16:09.648007 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkfkl"] Dec 09 17:16:09 crc kubenswrapper[4840]: I1209 17:16:09.662270 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"71339989-bd1f-4da4-8976-62fbb767a30e","Type":"ContainerStarted","Data":"07b0138bd552042c873396fee064f91dba2900d76120bb084cd8bc315d7d6c22"} Dec 09 17:16:09 crc kubenswrapper[4840]: I1209 17:16:09.728809 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-2xsrv"] Dec 09 17:16:09 crc kubenswrapper[4840]: W1209 17:16:09.801408 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b4945f7_403c_4d4a_aa3c_882776dc0240.slice/crio-33e4a66b692d92336c71dece3f9d71d3bd7b5c183d6cb50deea20c85da001841 WatchSource:0}: Error finding container 33e4a66b692d92336c71dece3f9d71d3bd7b5c183d6cb50deea20c85da001841: Status 404 returned error can't find the container with id 33e4a66b692d92336c71dece3f9d71d3bd7b5c183d6cb50deea20c85da001841 Dec 09 17:16:10 crc kubenswrapper[4840]: I1209 17:16:10.626162 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bae6b28b-2f89-44c6-b196-145e099e94e5" path="/var/lib/kubelet/pods/bae6b28b-2f89-44c6-b196-145e099e94e5/volumes" Dec 09 17:16:10 crc kubenswrapper[4840]: I1209 17:16:10.671911 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-2xsrv" event={"ID":"0e3eae38-0d51-4c6c-9258-41a7699cb1f1","Type":"ContainerStarted","Data":"484ab7db35be5d0a568b9ab658b757b3f7d5ddefba77dbd5bf95cd65263341ea"} Dec 09 17:16:10 crc kubenswrapper[4840]: I1209 17:16:10.674482 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" event={"ID":"4b4945f7-403c-4d4a-aa3c-882776dc0240","Type":"ContainerStarted","Data":"33e4a66b692d92336c71dece3f9d71d3bd7b5c183d6cb50deea20c85da001841"} Dec 09 17:16:10 crc kubenswrapper[4840]: I1209 17:16:10.677546 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" event={"ID":"be53cf19-ee08-4a03-96d8-5899cd1f59ec","Type":"ContainerStarted","Data":"a90d54266558cb0906cb75755b55061973c423c6ef5ea2922ab143d8de1377dd"} Dec 09 17:16:10 crc kubenswrapper[4840]: I1209 17:16:10.677588 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:16:10 crc kubenswrapper[4840]: I1209 17:16:10.709260 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" podStartSLOduration=13.608327876 podStartE2EDuration="27.709241361s" podCreationTimestamp="2025-12-09 17:15:43 +0000 UTC" firstStartedPulling="2025-12-09 17:15:54.962606748 +0000 UTC m=+1140.953717381" lastFinishedPulling="2025-12-09 17:16:09.063520233 +0000 UTC m=+1155.054630866" observedRunningTime="2025-12-09 17:16:10.69703711 +0000 UTC m=+1156.688147743" watchObservedRunningTime="2025-12-09 17:16:10.709241361 +0000 UTC m=+1156.700351994" Dec 09 17:16:11 crc kubenswrapper[4840]: I1209 17:16:11.695923 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9b2bc342-2987-4fc2-b078-bc5aa00c063d","Type":"ContainerStarted","Data":"5244fe0fbc7b7d1f4ccadb212c2501526ed510a64f3173064b1051ad0057d3ec"} Dec 09 17:16:12 crc kubenswrapper[4840]: I1209 17:16:12.424946 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:12 crc kubenswrapper[4840]: E1209 17:16:12.425303 4840 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 17:16:12 crc kubenswrapper[4840]: E1209 17:16:12.425346 4840 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 17:16:12 crc kubenswrapper[4840]: E1209 17:16:12.425440 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift podName:fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534 nodeName:}" failed. No retries permitted until 2025-12-09 17:16:20.425409648 +0000 UTC m=+1166.416520331 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift") pod "swift-storage-0" (UID: "fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534") : configmap "swift-ring-files" not found Dec 09 17:16:12 crc kubenswrapper[4840]: I1209 17:16:12.720132 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" event={"ID":"4cf23999-5210-4106-aa05-9ac1c07da2a1","Type":"ContainerStarted","Data":"44bf560b87fdf71b058d394f2ba48dceb77a8d0f1184eb607fa6b3c5e36b153b"} Dec 09 17:16:12 crc kubenswrapper[4840]: I1209 17:16:12.721591 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:16:12 crc kubenswrapper[4840]: I1209 17:16:12.729626 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"694a7e34-d1ce-4a1b-8475-fbb5d250b955","Type":"ContainerStarted","Data":"db5ec3ff388adefbe0647fb87cbe825a643bbfa0adfd5f21a70d78791cc508ec"} Dec 09 17:16:12 crc kubenswrapper[4840]: I1209 17:16:12.748643 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" podStartSLOduration=14.414283449 podStartE2EDuration="28.748625836s" podCreationTimestamp="2025-12-09 17:15:44 +0000 UTC" firstStartedPulling="2025-12-09 17:15:54.950311344 +0000 UTC m=+1140.941421977" lastFinishedPulling="2025-12-09 17:16:09.284653731 +0000 UTC m=+1155.275764364" observedRunningTime="2025-12-09 17:16:12.737731372 +0000 UTC m=+1158.728841995" watchObservedRunningTime="2025-12-09 17:16:12.748625836 +0000 UTC m=+1158.739736469" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.739065 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8","Type":"ContainerStarted","Data":"ad2d95f3afa8367d07b0402b04921c83afc7046c18af50a9bd6109deec7fba59"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.742054 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-index-gateway-0" event={"ID":"374f81b4-2b45-4e8a-9b41-898b64e5623f","Type":"ContainerStarted","Data":"c6adbf37f5d8d6922b452b37493fdf871a13a0937797b63994db9c39f85f6df6"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.742642 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.756202 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-59s29" event={"ID":"956f73a1-e1d1-4cc8-aa72-d9be637417b1","Type":"ContainerStarted","Data":"e19f435ec770ef6f77f4e6feb3184c1dc91a4702ef0802ef0f115f82f4786f70"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.759003 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" event={"ID":"4e1e443c-6d35-4788-8cd7-dae8911ffc1e","Type":"ContainerStarted","Data":"258ea13fc38c35054f3d5229e7d3ed34b1859a1d7fb81c98991b5773b224d254"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.759166 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.762996 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e90b0e97-f543-4d75-bb6d-8d96c7b3d663","Type":"ContainerStarted","Data":"bec60d7aeedceafb53803205ef79d83eae1ec02527856d1bc99add536c2e08c8"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.764982 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" event={"ID":"4b4945f7-403c-4d4a-aa3c-882776dc0240","Type":"ContainerStarted","Data":"afb71603b0ba18d74c753fca2e57a6d6de33138f63d3e35631dbb068394ac1bb"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.768131 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-compactor-0" event={"ID":"3049524c-ff2b-4c18-baf0-c15c182583cc","Type":"ContainerStarted","Data":"f693c18784c7f26160557594f313b3e103cbb1ef07c9d21eba72c99fc32be0fb"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.768168 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.771655 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-v78xq" event={"ID":"637ab881-6952-409f-8e9d-619aaf72fb51","Type":"ContainerStarted","Data":"69fb27fb42ec262e5b1c405c4b5aa261514f789bd792cfa3e08eff4b1560d9e5"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.771796 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-v78xq" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.773076 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" event={"ID":"64550645-76ad-4518-ad64-74d530e0a4f1","Type":"ContainerStarted","Data":"586777c735203d277e9a94ff64c107bf828df91c58faf6e0de52989f51fcd02f"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.773752 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.775193 4840 generic.go:334] "Generic (PLEG): container finished" podID="71339989-bd1f-4da4-8976-62fbb767a30e" containerID="07b0138bd552042c873396fee064f91dba2900d76120bb084cd8bc315d7d6c22" exitCode=0 Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.775238 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"71339989-bd1f-4da4-8976-62fbb767a30e","Type":"ContainerDied","Data":"07b0138bd552042c873396fee064f91dba2900d76120bb084cd8bc315d7d6c22"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.777253 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.779149 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"7b1009e9-391e-4a13-8d90-f55fb6c3b329","Type":"ContainerStarted","Data":"37d1de15b2b4ac31cd2976ac4a76ff9616c683519802ce798e511e792d22000f"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.780834 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-ingester-0" event={"ID":"b45f4212-4ee0-4679-b115-d8d231bf946d","Type":"ContainerStarted","Data":"7dc5379d264dec2e292385db800a6bea626487a0c3b1e3dd255a8ea21611c185"} Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.797859 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-b9cg8" podStartSLOduration=15.103778642 podStartE2EDuration="29.797840047s" podCreationTimestamp="2025-12-09 17:15:44 +0000 UTC" firstStartedPulling="2025-12-09 17:15:54.385924922 +0000 UTC m=+1140.377035555" lastFinishedPulling="2025-12-09 17:16:09.079986337 +0000 UTC m=+1155.071096960" observedRunningTime="2025-12-09 17:16:13.793743089 +0000 UTC m=+1159.784853722" watchObservedRunningTime="2025-12-09 17:16:13.797840047 +0000 UTC m=+1159.788950680" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.800427 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.821063 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-index-gateway-0" podStartSLOduration=15.546683895 podStartE2EDuration="29.821046285s" podCreationTimestamp="2025-12-09 17:15:44 +0000 UTC" firstStartedPulling="2025-12-09 17:15:54.923522672 +0000 UTC m=+1140.914633305" lastFinishedPulling="2025-12-09 17:16:09.197885062 +0000 UTC m=+1155.188995695" observedRunningTime="2025-12-09 17:16:13.820683315 +0000 UTC m=+1159.811793948" watchObservedRunningTime="2025-12-09 17:16:13.821046285 +0000 UTC m=+1159.812156918" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.847020 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-compactor-0" podStartSLOduration=15.533092184000001 podStartE2EDuration="29.847003283s" podCreationTimestamp="2025-12-09 17:15:44 +0000 UTC" firstStartedPulling="2025-12-09 17:15:54.883975433 +0000 UTC m=+1140.875086066" lastFinishedPulling="2025-12-09 17:16:09.197886532 +0000 UTC m=+1155.188997165" observedRunningTime="2025-12-09 17:16:13.842167524 +0000 UTC m=+1159.833278177" watchObservedRunningTime="2025-12-09 17:16:13.847003283 +0000 UTC m=+1159.838113916" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.886077 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-fj2hp" podStartSLOduration=14.291317425999999 podStartE2EDuration="29.886057187s" podCreationTimestamp="2025-12-09 17:15:44 +0000 UTC" firstStartedPulling="2025-12-09 17:15:53.603160092 +0000 UTC m=+1139.594270725" lastFinishedPulling="2025-12-09 17:16:09.197899853 +0000 UTC m=+1155.189010486" observedRunningTime="2025-12-09 17:16:13.881790104 +0000 UTC m=+1159.872900727" watchObservedRunningTime="2025-12-09 17:16:13.886057187 +0000 UTC m=+1159.877167820" Dec 09 17:16:13 crc kubenswrapper[4840]: I1209 17:16:13.960249 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-v78xq" podStartSLOduration=22.612553597 podStartE2EDuration="37.960227173s" podCreationTimestamp="2025-12-09 17:15:36 +0000 UTC" firstStartedPulling="2025-12-09 17:15:53.571543122 +0000 UTC m=+1139.562653755" lastFinishedPulling="2025-12-09 17:16:08.919216698 +0000 UTC m=+1154.910327331" observedRunningTime="2025-12-09 17:16:13.941733521 +0000 UTC m=+1159.932844154" watchObservedRunningTime="2025-12-09 17:16:13.960227173 +0000 UTC m=+1159.951337806" Dec 09 17:16:14 crc kubenswrapper[4840]: I1209 17:16:14.020703 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-ingester-0" podStartSLOduration=15.746766977 podStartE2EDuration="30.020671024s" podCreationTimestamp="2025-12-09 17:15:44 +0000 UTC" firstStartedPulling="2025-12-09 17:15:54.924054407 +0000 UTC m=+1140.915165040" lastFinishedPulling="2025-12-09 17:16:09.197958454 +0000 UTC m=+1155.189069087" observedRunningTime="2025-12-09 17:16:14.011665834 +0000 UTC m=+1160.002776467" watchObservedRunningTime="2025-12-09 17:16:14.020671024 +0000 UTC m=+1160.011781657" Dec 09 17:16:14 crc kubenswrapper[4840]: I1209 17:16:14.803167 4840 generic.go:334] "Generic (PLEG): container finished" podID="956f73a1-e1d1-4cc8-aa72-d9be637417b1" containerID="e19f435ec770ef6f77f4e6feb3184c1dc91a4702ef0802ef0f115f82f4786f70" exitCode=0 Dec 09 17:16:14 crc kubenswrapper[4840]: I1209 17:16:14.803337 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-59s29" event={"ID":"956f73a1-e1d1-4cc8-aa72-d9be637417b1","Type":"ContainerDied","Data":"e19f435ec770ef6f77f4e6feb3184c1dc91a4702ef0802ef0f115f82f4786f70"} Dec 09 17:16:14 crc kubenswrapper[4840]: I1209 17:16:14.806452 4840 generic.go:334] "Generic (PLEG): container finished" podID="4b4945f7-403c-4d4a-aa3c-882776dc0240" containerID="afb71603b0ba18d74c753fca2e57a6d6de33138f63d3e35631dbb068394ac1bb" exitCode=0 Dec 09 17:16:14 crc kubenswrapper[4840]: I1209 17:16:14.806563 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" event={"ID":"4b4945f7-403c-4d4a-aa3c-882776dc0240","Type":"ContainerDied","Data":"afb71603b0ba18d74c753fca2e57a6d6de33138f63d3e35631dbb068394ac1bb"} Dec 09 17:16:14 crc kubenswrapper[4840]: I1209 17:16:14.815406 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"13330488-1e93-4a88-8f15-331ee0b935cf","Type":"ContainerStarted","Data":"8c95a524b9ede1e17b6b9134c9b6fa5150668a10dbdf64d70f20c22e76020e1d"} Dec 09 17:16:14 crc kubenswrapper[4840]: I1209 17:16:14.823504 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a4b9253d-0e13-4dd3-8b9a-7428281a743d","Type":"ContainerStarted","Data":"187c3e7d24ce70f528751e53af77e17df4f5d63bee4ff89de79f2ad394e49450"} Dec 09 17:16:14 crc kubenswrapper[4840]: I1209 17:16:14.825130 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:16:16 crc kubenswrapper[4840]: E1209 17:16:16.827899 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-sb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-sb-0" podUID="a4b9253d-0e13-4dd3-8b9a-7428281a743d" Dec 09 17:16:16 crc kubenswrapper[4840]: E1209 17:16:16.842287 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-sb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="a4b9253d-0e13-4dd3-8b9a-7428281a743d" Dec 09 17:16:16 crc kubenswrapper[4840]: I1209 17:16:16.860646 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=26.803156555 podStartE2EDuration="44.860604489s" podCreationTimestamp="2025-12-09 17:15:32 +0000 UTC" firstStartedPulling="2025-12-09 17:15:54.400655076 +0000 UTC m=+1140.391765709" lastFinishedPulling="2025-12-09 17:16:12.458103 +0000 UTC m=+1158.449213643" observedRunningTime="2025-12-09 17:16:16.859478686 +0000 UTC m=+1162.850589319" watchObservedRunningTime="2025-12-09 17:16:16.860604489 +0000 UTC m=+1162.851715122" Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.868877 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" event={"ID":"4b4945f7-403c-4d4a-aa3c-882776dc0240","Type":"ContainerStarted","Data":"eea028e75c56fb43ec96ac6605f01fe109ccfb27839ca43d134c9ef0ad890aa1"} Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.869301 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.871074 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"7b1009e9-391e-4a13-8d90-f55fb6c3b329","Type":"ContainerStarted","Data":"3184eeb80d7835fdf48adf867c23f4fa07fe17d47f4b5277fe5ff1f6687e8d60"} Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.873809 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"71339989-bd1f-4da4-8976-62fbb767a30e","Type":"ContainerStarted","Data":"0c81105fb18222fef0cecc84ec0801e1165d4180fb86366b2b73e84589d22f5d"} Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.876414 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-59s29" event={"ID":"956f73a1-e1d1-4cc8-aa72-d9be637417b1","Type":"ContainerStarted","Data":"6ced53f2a6ab8aa42e748049d5c3f58be5a34e64e573cb1cf56a73a0201ba395"} Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.876826 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.878621 4840 generic.go:334] "Generic (PLEG): container finished" podID="694a7e34-d1ce-4a1b-8475-fbb5d250b955" containerID="db5ec3ff388adefbe0647fb87cbe825a643bbfa0adfd5f21a70d78791cc508ec" exitCode=0 Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.878667 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"694a7e34-d1ce-4a1b-8475-fbb5d250b955","Type":"ContainerDied","Data":"db5ec3ff388adefbe0647fb87cbe825a643bbfa0adfd5f21a70d78791cc508ec"} Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.881520 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-2xsrv" event={"ID":"0e3eae38-0d51-4c6c-9258-41a7699cb1f1","Type":"ContainerStarted","Data":"9d66ba2c89d32ad0fdb1692d75ab578eee052d0f10108bd0fbea8e77a706c04c"} Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.885782 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" podStartSLOduration=15.885763523 podStartE2EDuration="15.885763523s" podCreationTimestamp="2025-12-09 17:16:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:16:18.883833457 +0000 UTC m=+1164.874944090" watchObservedRunningTime="2025-12-09 17:16:18.885763523 +0000 UTC m=+1164.876874156" Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.925388 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-2xsrv" podStartSLOduration=5.429133856 podStartE2EDuration="13.925371413s" podCreationTimestamp="2025-12-09 17:16:05 +0000 UTC" firstStartedPulling="2025-12-09 17:16:09.807117915 +0000 UTC m=+1155.798228548" lastFinishedPulling="2025-12-09 17:16:18.303355472 +0000 UTC m=+1164.294466105" observedRunningTime="2025-12-09 17:16:18.919980018 +0000 UTC m=+1164.911090641" watchObservedRunningTime="2025-12-09 17:16:18.925371413 +0000 UTC m=+1164.916482046" Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.941673 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-59s29" podStartSLOduration=11.226660589 podStartE2EDuration="52.941651972s" podCreationTimestamp="2025-12-09 17:15:26 +0000 UTC" firstStartedPulling="2025-12-09 17:15:27.569317748 +0000 UTC m=+1113.560428391" lastFinishedPulling="2025-12-09 17:16:09.284309131 +0000 UTC m=+1155.275419774" observedRunningTime="2025-12-09 17:16:18.934781284 +0000 UTC m=+1164.925891917" watchObservedRunningTime="2025-12-09 17:16:18.941651972 +0000 UTC m=+1164.932762605" Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.959434 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=34.43453118 podStartE2EDuration="50.959418903s" podCreationTimestamp="2025-12-09 17:15:28 +0000 UTC" firstStartedPulling="2025-12-09 17:15:52.020428258 +0000 UTC m=+1138.011538891" lastFinishedPulling="2025-12-09 17:16:08.545315981 +0000 UTC m=+1154.536426614" observedRunningTime="2025-12-09 17:16:18.951701701 +0000 UTC m=+1164.942812334" watchObservedRunningTime="2025-12-09 17:16:18.959418903 +0000 UTC m=+1164.950529536" Dec 09 17:16:18 crc kubenswrapper[4840]: I1209 17:16:18.976037 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=29.780083859 podStartE2EDuration="43.976012081s" podCreationTimestamp="2025-12-09 17:15:35 +0000 UTC" firstStartedPulling="2025-12-09 17:15:54.9811045 +0000 UTC m=+1140.972215133" lastFinishedPulling="2025-12-09 17:16:09.177032722 +0000 UTC m=+1155.168143355" observedRunningTime="2025-12-09 17:16:18.971559593 +0000 UTC m=+1164.962670236" watchObservedRunningTime="2025-12-09 17:16:18.976012081 +0000 UTC m=+1164.967122714" Dec 09 17:16:19 crc kubenswrapper[4840]: I1209 17:16:19.279952 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Dec 09 17:16:19 crc kubenswrapper[4840]: I1209 17:16:19.315737 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Dec 09 17:16:19 crc kubenswrapper[4840]: I1209 17:16:19.388078 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Dec 09 17:16:19 crc kubenswrapper[4840]: I1209 17:16:19.388128 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Dec 09 17:16:19 crc kubenswrapper[4840]: I1209 17:16:19.894864 4840 generic.go:334] "Generic (PLEG): container finished" podID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerID="bec60d7aeedceafb53803205ef79d83eae1ec02527856d1bc99add536c2e08c8" exitCode=0 Dec 09 17:16:19 crc kubenswrapper[4840]: I1209 17:16:19.894945 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e90b0e97-f543-4d75-bb6d-8d96c7b3d663","Type":"ContainerDied","Data":"bec60d7aeedceafb53803205ef79d83eae1ec02527856d1bc99add536c2e08c8"} Dec 09 17:16:19 crc kubenswrapper[4840]: I1209 17:16:19.899177 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" event={"ID":"7838bb86-5c5f-4100-aa6c-442e1e591645","Type":"ContainerStarted","Data":"30af26444cae11ee3407cc6d0d11079794a95042d548cebdf2adc531fbc08938"} Dec 09 17:16:19 crc kubenswrapper[4840]: I1209 17:16:19.899862 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Dec 09 17:16:19 crc kubenswrapper[4840]: I1209 17:16:19.949451 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" podStartSLOduration=-9223372000.905346 podStartE2EDuration="35.949429571s" podCreationTimestamp="2025-12-09 17:15:44 +0000 UTC" firstStartedPulling="2025-12-09 17:15:52.021089667 +0000 UTC m=+1138.012200300" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:16:19.947609109 +0000 UTC m=+1165.938719752" watchObservedRunningTime="2025-12-09 17:16:19.949429571 +0000 UTC m=+1165.940540214" Dec 09 17:16:19 crc kubenswrapper[4840]: I1209 17:16:19.957579 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.236529 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-59s29"] Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.262699 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d65f699f-r5r2w"] Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.268921 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.272844 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-66bsn"] Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.274156 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.278116 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.278237 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.286367 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d65f699f-r5r2w"] Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.301356 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-66bsn"] Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.387893 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb8eb8de-7e32-4535-9015-394a0621e5a7-config\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.388213 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb8eb8de-7e32-4535-9015-394a0621e5a7-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.388531 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-dns-svc\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.389329 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-config\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.389419 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qq6rt\" (UniqueName: \"kubernetes.io/projected/cb8eb8de-7e32-4535-9015-394a0621e5a7-kube-api-access-qq6rt\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.389566 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/cb8eb8de-7e32-4535-9015-394a0621e5a7-ovs-rundir\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.389658 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8km5\" (UniqueName: \"kubernetes.io/projected/dc428deb-2794-47e6-879d-fa0b15023a60-kube-api-access-t8km5\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.389749 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/cb8eb8de-7e32-4535-9015-394a0621e5a7-ovn-rundir\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.389871 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-ovsdbserver-nb\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.390022 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb8eb8de-7e32-4535-9015-394a0621e5a7-combined-ca-bundle\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.491750 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.491815 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-config\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.491836 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qq6rt\" (UniqueName: \"kubernetes.io/projected/cb8eb8de-7e32-4535-9015-394a0621e5a7-kube-api-access-qq6rt\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.491869 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/cb8eb8de-7e32-4535-9015-394a0621e5a7-ovs-rundir\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.491883 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8km5\" (UniqueName: \"kubernetes.io/projected/dc428deb-2794-47e6-879d-fa0b15023a60-kube-api-access-t8km5\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.491900 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/cb8eb8de-7e32-4535-9015-394a0621e5a7-ovn-rundir\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.491930 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-ovsdbserver-nb\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.491965 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb8eb8de-7e32-4535-9015-394a0621e5a7-combined-ca-bundle\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.492006 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb8eb8de-7e32-4535-9015-394a0621e5a7-config\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.492019 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb8eb8de-7e32-4535-9015-394a0621e5a7-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.492064 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-dns-svc\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.492898 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-dns-svc\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: E1209 17:16:20.493019 4840 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 17:16:20 crc kubenswrapper[4840]: E1209 17:16:20.493036 4840 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 17:16:20 crc kubenswrapper[4840]: E1209 17:16:20.493070 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift podName:fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534 nodeName:}" failed. No retries permitted until 2025-12-09 17:16:36.493058905 +0000 UTC m=+1182.484169538 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift") pod "swift-storage-0" (UID: "fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534") : configmap "swift-ring-files" not found Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.493880 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-config\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.494275 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/cb8eb8de-7e32-4535-9015-394a0621e5a7-ovs-rundir\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.494349 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/cb8eb8de-7e32-4535-9015-394a0621e5a7-ovn-rundir\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.494904 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-ovsdbserver-nb\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.495728 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb8eb8de-7e32-4535-9015-394a0621e5a7-config\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.536692 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb8eb8de-7e32-4535-9015-394a0621e5a7-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.537743 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb8eb8de-7e32-4535-9015-394a0621e5a7-combined-ca-bundle\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.539063 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qq6rt\" (UniqueName: \"kubernetes.io/projected/cb8eb8de-7e32-4535-9015-394a0621e5a7-kube-api-access-qq6rt\") pod \"ovn-controller-metrics-66bsn\" (UID: \"cb8eb8de-7e32-4535-9015-394a0621e5a7\") " pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.544753 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8km5\" (UniqueName: \"kubernetes.io/projected/dc428deb-2794-47e6-879d-fa0b15023a60-kube-api-access-t8km5\") pod \"dnsmasq-dns-57d65f699f-r5r2w\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.613394 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.630982 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-66bsn" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.653602 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkfkl"] Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.678747 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-lqngs"] Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.683610 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.685403 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.704802 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-lqngs"] Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.797354 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.797404 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.797429 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.797486 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqk98\" (UniqueName: \"kubernetes.io/projected/97461318-407b-4b1d-b565-b0fcdd35a60d-kube-api-access-lqk98\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.797539 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-config\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.898760 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.898807 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.898829 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.898889 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqk98\" (UniqueName: \"kubernetes.io/projected/97461318-407b-4b1d-b565-b0fcdd35a60d-kube-api-access-lqk98\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.898953 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-config\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.899851 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.900006 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-config\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.900178 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.901260 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.916779 4840 generic.go:334] "Generic (PLEG): container finished" podID="e56689ef-4c1c-4775-9740-3e1ec3a0f4e8" containerID="ad2d95f3afa8367d07b0402b04921c83afc7046c18af50a9bd6109deec7fba59" exitCode=0 Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.916922 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8","Type":"ContainerDied","Data":"ad2d95f3afa8367d07b0402b04921c83afc7046c18af50a9bd6109deec7fba59"} Dec 09 17:16:20 crc kubenswrapper[4840]: I1209 17:16:20.941160 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqk98\" (UniqueName: \"kubernetes.io/projected/97461318-407b-4b1d-b565-b0fcdd35a60d-kube-api-access-lqk98\") pod \"dnsmasq-dns-b8fbc5445-lqngs\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.054214 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.132253 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d65f699f-r5r2w"] Dec 09 17:16:21 crc kubenswrapper[4840]: W1209 17:16:21.151795 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc428deb_2794_47e6_879d_fa0b15023a60.slice/crio-ea0d6b74828bd02d57a6458f3ea434c1fe3d0b869b030ea4e27385e967d7f8de WatchSource:0}: Error finding container ea0d6b74828bd02d57a6458f3ea434c1fe3d0b869b030ea4e27385e967d7f8de: Status 404 returned error can't find the container with id ea0d6b74828bd02d57a6458f3ea434c1fe3d0b869b030ea4e27385e967d7f8de Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.272701 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-66bsn"] Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.771232 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-lqngs"] Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.928765 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-66bsn" event={"ID":"cb8eb8de-7e32-4535-9015-394a0621e5a7","Type":"ContainerStarted","Data":"dbb04b28e93b5e0fd3507bcde8d126501896fe49d0e2bfa9711598a18be647ce"} Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.928812 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-66bsn" event={"ID":"cb8eb8de-7e32-4535-9015-394a0621e5a7","Type":"ContainerStarted","Data":"a750833badb9900e03ffd136bb7f1dfd41c371f92d1f9e3d5467a1b3dccea72d"} Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.931060 4840 generic.go:334] "Generic (PLEG): container finished" podID="dc428deb-2794-47e6-879d-fa0b15023a60" containerID="470176f56f403bc20f93e949c983fc57dd1b894d1ed79c8b7f1f4c89ce2b230c" exitCode=0 Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.931120 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" event={"ID":"dc428deb-2794-47e6-879d-fa0b15023a60","Type":"ContainerDied","Data":"470176f56f403bc20f93e949c983fc57dd1b894d1ed79c8b7f1f4c89ce2b230c"} Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.931138 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" event={"ID":"dc428deb-2794-47e6-879d-fa0b15023a60","Type":"ContainerStarted","Data":"ea0d6b74828bd02d57a6458f3ea434c1fe3d0b869b030ea4e27385e967d7f8de"} Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.937304 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e56689ef-4c1c-4775-9740-3e1ec3a0f4e8","Type":"ContainerStarted","Data":"21d1a1b888ee17d0c65312cd15d347e380f4326b323856daf1719b4f20ed03d7"} Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.938617 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" event={"ID":"97461318-407b-4b1d-b565-b0fcdd35a60d","Type":"ContainerStarted","Data":"f8d110bf9a0d63cd3686db03ead4ce11718a9c2e818be7f75ea0a671714171c3"} Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.938668 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" podUID="4b4945f7-403c-4d4a-aa3c-882776dc0240" containerName="dnsmasq-dns" containerID="cri-o://eea028e75c56fb43ec96ac6605f01fe109ccfb27839ca43d134c9ef0ad890aa1" gracePeriod=10 Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.940613 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-59s29" podUID="956f73a1-e1d1-4cc8-aa72-d9be637417b1" containerName="dnsmasq-dns" containerID="cri-o://6ced53f2a6ab8aa42e748049d5c3f58be5a34e64e573cb1cf56a73a0201ba395" gracePeriod=10 Dec 09 17:16:21 crc kubenswrapper[4840]: I1209 17:16:21.961645 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-66bsn" podStartSLOduration=1.961618742 podStartE2EDuration="1.961618742s" podCreationTimestamp="2025-12-09 17:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:16:21.960348025 +0000 UTC m=+1167.951458668" watchObservedRunningTime="2025-12-09 17:16:21.961618742 +0000 UTC m=+1167.952729375" Dec 09 17:16:22 crc kubenswrapper[4840]: I1209 17:16:22.032459 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=34.408458874 podStartE2EDuration="53.032444661s" podCreationTimestamp="2025-12-09 17:15:29 +0000 UTC" firstStartedPulling="2025-12-09 17:15:50.444308493 +0000 UTC m=+1136.435419126" lastFinishedPulling="2025-12-09 17:16:09.06829428 +0000 UTC m=+1155.059404913" observedRunningTime="2025-12-09 17:16:22.029147596 +0000 UTC m=+1168.020258229" watchObservedRunningTime="2025-12-09 17:16:22.032444661 +0000 UTC m=+1168.023555294" Dec 09 17:16:22 crc kubenswrapper[4840]: I1209 17:16:22.948331 4840 generic.go:334] "Generic (PLEG): container finished" podID="4b4945f7-403c-4d4a-aa3c-882776dc0240" containerID="eea028e75c56fb43ec96ac6605f01fe109ccfb27839ca43d134c9ef0ad890aa1" exitCode=0 Dec 09 17:16:22 crc kubenswrapper[4840]: I1209 17:16:22.948413 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" event={"ID":"4b4945f7-403c-4d4a-aa3c-882776dc0240","Type":"ContainerDied","Data":"eea028e75c56fb43ec96ac6605f01fe109ccfb27839ca43d134c9ef0ad890aa1"} Dec 09 17:16:22 crc kubenswrapper[4840]: I1209 17:16:22.955251 4840 generic.go:334] "Generic (PLEG): container finished" podID="956f73a1-e1d1-4cc8-aa72-d9be637417b1" containerID="6ced53f2a6ab8aa42e748049d5c3f58be5a34e64e573cb1cf56a73a0201ba395" exitCode=0 Dec 09 17:16:22 crc kubenswrapper[4840]: I1209 17:16:22.955298 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-59s29" event={"ID":"956f73a1-e1d1-4cc8-aa72-d9be637417b1","Type":"ContainerDied","Data":"6ced53f2a6ab8aa42e748049d5c3f58be5a34e64e573cb1cf56a73a0201ba395"} Dec 09 17:16:22 crc kubenswrapper[4840]: I1209 17:16:22.956841 4840 generic.go:334] "Generic (PLEG): container finished" podID="97461318-407b-4b1d-b565-b0fcdd35a60d" containerID="60344c6015aaa186c19c1179312dadac2a1c720efe01f5c1709f8b0e86711d1e" exitCode=0 Dec 09 17:16:22 crc kubenswrapper[4840]: I1209 17:16:22.957108 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" event={"ID":"97461318-407b-4b1d-b565-b0fcdd35a60d","Type":"ContainerDied","Data":"60344c6015aaa186c19c1179312dadac2a1c720efe01f5c1709f8b0e86711d1e"} Dec 09 17:16:23 crc kubenswrapper[4840]: I1209 17:16:23.254148 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 09 17:16:23 crc kubenswrapper[4840]: I1209 17:16:23.266721 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 09 17:16:23 crc kubenswrapper[4840]: I1209 17:16:23.702161 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Dec 09 17:16:23 crc kubenswrapper[4840]: I1209 17:16:23.971924 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.219532 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-mtfrn" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.562608 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.566020 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.619304 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dd9cr\" (UniqueName: \"kubernetes.io/projected/956f73a1-e1d1-4cc8-aa72-d9be637417b1-kube-api-access-dd9cr\") pod \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.619394 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-dns-svc\") pod \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.619523 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-config\") pod \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\" (UID: \"956f73a1-e1d1-4cc8-aa72-d9be637417b1\") " Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.623465 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.636646 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/956f73a1-e1d1-4cc8-aa72-d9be637417b1-kube-api-access-dd9cr" (OuterVolumeSpecName: "kube-api-access-dd9cr") pod "956f73a1-e1d1-4cc8-aa72-d9be637417b1" (UID: "956f73a1-e1d1-4cc8-aa72-d9be637417b1"). InnerVolumeSpecName "kube-api-access-dd9cr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.718026 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "956f73a1-e1d1-4cc8-aa72-d9be637417b1" (UID: "956f73a1-e1d1-4cc8-aa72-d9be637417b1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.722021 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-dns-svc\") pod \"4b4945f7-403c-4d4a-aa3c-882776dc0240\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.722126 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-config\") pod \"4b4945f7-403c-4d4a-aa3c-882776dc0240\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.722283 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4fw7\" (UniqueName: \"kubernetes.io/projected/4b4945f7-403c-4d4a-aa3c-882776dc0240-kube-api-access-b4fw7\") pod \"4b4945f7-403c-4d4a-aa3c-882776dc0240\" (UID: \"4b4945f7-403c-4d4a-aa3c-882776dc0240\") " Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.722819 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dd9cr\" (UniqueName: \"kubernetes.io/projected/956f73a1-e1d1-4cc8-aa72-d9be637417b1-kube-api-access-dd9cr\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.722842 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.725715 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b4945f7-403c-4d4a-aa3c-882776dc0240-kube-api-access-b4fw7" (OuterVolumeSpecName: "kube-api-access-b4fw7") pod "4b4945f7-403c-4d4a-aa3c-882776dc0240" (UID: "4b4945f7-403c-4d4a-aa3c-882776dc0240"). InnerVolumeSpecName "kube-api-access-b4fw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.769939 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-config" (OuterVolumeSpecName: "config") pod "956f73a1-e1d1-4cc8-aa72-d9be637417b1" (UID: "956f73a1-e1d1-4cc8-aa72-d9be637417b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.781054 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-config" (OuterVolumeSpecName: "config") pod "4b4945f7-403c-4d4a-aa3c-882776dc0240" (UID: "4b4945f7-403c-4d4a-aa3c-882776dc0240"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.796120 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4b4945f7-403c-4d4a-aa3c-882776dc0240" (UID: "4b4945f7-403c-4d4a-aa3c-882776dc0240"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.824600 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4fw7\" (UniqueName: \"kubernetes.io/projected/4b4945f7-403c-4d4a-aa3c-882776dc0240-kube-api-access-b4fw7\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.825019 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/956f73a1-e1d1-4cc8-aa72-d9be637417b1-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.825122 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.825203 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b4945f7-403c-4d4a-aa3c-882776dc0240-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:24 crc kubenswrapper[4840]: I1209 17:16:24.994676 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"694a7e34-d1ce-4a1b-8475-fbb5d250b955","Type":"ContainerStarted","Data":"f4be9b61e6baf579a640d9190c24ae61693f0524da2dd3edda0dd5733b97733e"} Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.003122 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" event={"ID":"97461318-407b-4b1d-b565-b0fcdd35a60d","Type":"ContainerStarted","Data":"c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8"} Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.003521 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.009025 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" event={"ID":"4b4945f7-403c-4d4a-aa3c-882776dc0240","Type":"ContainerDied","Data":"33e4a66b692d92336c71dece3f9d71d3bd7b5c183d6cb50deea20c85da001841"} Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.009210 4840 scope.go:117] "RemoveContainer" containerID="eea028e75c56fb43ec96ac6605f01fe109ccfb27839ca43d134c9ef0ad890aa1" Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.009415 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.021310 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" event={"ID":"dc428deb-2794-47e6-879d-fa0b15023a60","Type":"ContainerStarted","Data":"31c7007edf7cdbbf02637963255fc7cfa65dbad30f589cfa485f37360e126325"} Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.021985 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.031102 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6vxgb" event={"ID":"d3189254-8cff-481a-92f3-466a928de54e","Type":"ContainerStarted","Data":"ed7a88c513275edca983731df0876c2c0c9a11024f523f50f6c587ee21310345"} Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.035665 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" podStartSLOduration=5.035648398 podStartE2EDuration="5.035648398s" podCreationTimestamp="2025-12-09 17:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:16:25.022296384 +0000 UTC m=+1171.013407017" watchObservedRunningTime="2025-12-09 17:16:25.035648398 +0000 UTC m=+1171.026759021" Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.047296 4840 scope.go:117] "RemoveContainer" containerID="afb71603b0ba18d74c753fca2e57a6d6de33138f63d3e35631dbb068394ac1bb" Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.049404 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-59s29" event={"ID":"956f73a1-e1d1-4cc8-aa72-d9be637417b1","Type":"ContainerDied","Data":"306397de722f4cb84c5630cc14a6236e78ee360389803cf903ad543d23f7f6d6"} Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.049518 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-59s29" Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.056949 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" podStartSLOduration=5.056924921 podStartE2EDuration="5.056924921s" podCreationTimestamp="2025-12-09 17:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:16:25.047619253 +0000 UTC m=+1171.038729886" watchObservedRunningTime="2025-12-09 17:16:25.056924921 +0000 UTC m=+1171.048035554" Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.086615 4840 scope.go:117] "RemoveContainer" containerID="6ced53f2a6ab8aa42e748049d5c3f58be5a34e64e573cb1cf56a73a0201ba395" Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.109775 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkfkl"] Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.116265 4840 scope.go:117] "RemoveContainer" containerID="e19f435ec770ef6f77f4e6feb3184c1dc91a4702ef0802ef0f115f82f4786f70" Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.120677 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkfkl"] Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.140464 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-59s29"] Dec 09 17:16:25 crc kubenswrapper[4840]: I1209 17:16:25.148202 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-59s29"] Dec 09 17:16:26 crc kubenswrapper[4840]: I1209 17:16:26.059865 4840 generic.go:334] "Generic (PLEG): container finished" podID="d3189254-8cff-481a-92f3-466a928de54e" containerID="ed7a88c513275edca983731df0876c2c0c9a11024f523f50f6c587ee21310345" exitCode=0 Dec 09 17:16:26 crc kubenswrapper[4840]: I1209 17:16:26.059926 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6vxgb" event={"ID":"d3189254-8cff-481a-92f3-466a928de54e","Type":"ContainerDied","Data":"ed7a88c513275edca983731df0876c2c0c9a11024f523f50f6c587ee21310345"} Dec 09 17:16:26 crc kubenswrapper[4840]: I1209 17:16:26.618846 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b4945f7-403c-4d4a-aa3c-882776dc0240" path="/var/lib/kubelet/pods/4b4945f7-403c-4d4a-aa3c-882776dc0240/volumes" Dec 09 17:16:26 crc kubenswrapper[4840]: I1209 17:16:26.619669 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="956f73a1-e1d1-4cc8-aa72-d9be637417b1" path="/var/lib/kubelet/pods/956f73a1-e1d1-4cc8-aa72-d9be637417b1/volumes" Dec 09 17:16:27 crc kubenswrapper[4840]: I1209 17:16:27.069323 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-compactor-0" Dec 09 17:16:27 crc kubenswrapper[4840]: I1209 17:16:27.076836 4840 generic.go:334] "Generic (PLEG): container finished" podID="0e3eae38-0d51-4c6c-9258-41a7699cb1f1" containerID="9d66ba2c89d32ad0fdb1692d75ab578eee052d0f10108bd0fbea8e77a706c04c" exitCode=0 Dec 09 17:16:27 crc kubenswrapper[4840]: I1209 17:16:27.076900 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-2xsrv" event={"ID":"0e3eae38-0d51-4c6c-9258-41a7699cb1f1","Type":"ContainerDied","Data":"9d66ba2c89d32ad0fdb1692d75ab578eee052d0f10108bd0fbea8e77a706c04c"} Dec 09 17:16:27 crc kubenswrapper[4840]: I1209 17:16:27.081430 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"694a7e34-d1ce-4a1b-8475-fbb5d250b955","Type":"ContainerStarted","Data":"3322ae8b14f7193f84b00f28147ba48289947a9c66e0b6a02ad5413bf8310c45"} Dec 09 17:16:27 crc kubenswrapper[4840]: I1209 17:16:27.081660 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Dec 09 17:16:27 crc kubenswrapper[4840]: I1209 17:16:27.086096 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Dec 09 17:16:27 crc kubenswrapper[4840]: I1209 17:16:27.131642 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=21.762942417 podStartE2EDuration="54.131627552s" podCreationTimestamp="2025-12-09 17:15:33 +0000 UTC" firstStartedPulling="2025-12-09 17:15:52.020484519 +0000 UTC m=+1138.011595152" lastFinishedPulling="2025-12-09 17:16:24.389169654 +0000 UTC m=+1170.380280287" observedRunningTime="2025-12-09 17:16:27.131276972 +0000 UTC m=+1173.122387605" watchObservedRunningTime="2025-12-09 17:16:27.131627552 +0000 UTC m=+1173.122738185" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.096937 4840 generic.go:334] "Generic (PLEG): container finished" podID="7170c3b2-9d93-4736-8ade-66423bc4a081" containerID="2365d04c83aa2f9dd63e9cee92a15b62873fc69fa0193a589f63c05ec42bf785" exitCode=0 Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.097041 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7170c3b2-9d93-4736-8ade-66423bc4a081","Type":"ContainerDied","Data":"2365d04c83aa2f9dd63e9cee92a15b62873fc69fa0193a589f63c05ec42bf785"} Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.571259 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.600932 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-ring-data-devices\") pod \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.600999 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-combined-ca-bundle\") pod \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.601028 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-swiftconf\") pod \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.601187 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lc9p7\" (UniqueName: \"kubernetes.io/projected/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-kube-api-access-lc9p7\") pod \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.601249 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-etc-swift\") pod \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.601277 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-scripts\") pod \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.601333 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-dispersionconf\") pod \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\" (UID: \"0e3eae38-0d51-4c6c-9258-41a7699cb1f1\") " Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.605169 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "0e3eae38-0d51-4c6c-9258-41a7699cb1f1" (UID: "0e3eae38-0d51-4c6c-9258-41a7699cb1f1"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.605472 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "0e3eae38-0d51-4c6c-9258-41a7699cb1f1" (UID: "0e3eae38-0d51-4c6c-9258-41a7699cb1f1"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.632475 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "0e3eae38-0d51-4c6c-9258-41a7699cb1f1" (UID: "0e3eae38-0d51-4c6c-9258-41a7699cb1f1"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.636172 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-kube-api-access-lc9p7" (OuterVolumeSpecName: "kube-api-access-lc9p7") pod "0e3eae38-0d51-4c6c-9258-41a7699cb1f1" (UID: "0e3eae38-0d51-4c6c-9258-41a7699cb1f1"). InnerVolumeSpecName "kube-api-access-lc9p7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.682288 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7cb5889db5-bkfkl" podUID="4b4945f7-403c-4d4a-aa3c-882776dc0240" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: i/o timeout" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.707071 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lc9p7\" (UniqueName: \"kubernetes.io/projected/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-kube-api-access-lc9p7\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.707107 4840 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-etc-swift\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.707119 4840 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-dispersionconf\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.707132 4840 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-ring-data-devices\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.711672 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e3eae38-0d51-4c6c-9258-41a7699cb1f1" (UID: "0e3eae38-0d51-4c6c-9258-41a7699cb1f1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.733861 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "0e3eae38-0d51-4c6c-9258-41a7699cb1f1" (UID: "0e3eae38-0d51-4c6c-9258-41a7699cb1f1"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.756452 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-scripts" (OuterVolumeSpecName: "scripts") pod "0e3eae38-0d51-4c6c-9258-41a7699cb1f1" (UID: "0e3eae38-0d51-4c6c-9258-41a7699cb1f1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.808848 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.808873 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:28 crc kubenswrapper[4840]: I1209 17:16:28.808882 4840 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0e3eae38-0d51-4c6c-9258-41a7699cb1f1-swiftconf\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.107923 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6vxgb" event={"ID":"d3189254-8cff-481a-92f3-466a928de54e","Type":"ContainerStarted","Data":"d3069aa01950dc6ff6fdaab818b47e1446e6c195f609a252b58d2fcfc0757b21"} Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.107984 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6vxgb" event={"ID":"d3189254-8cff-481a-92f3-466a928de54e","Type":"ContainerStarted","Data":"a33c169aa1007d1eb6b74c6791c794b7533d31ca5738eb5edfe8af13122c93cd"} Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.109041 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.109100 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.110518 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7170c3b2-9d93-4736-8ade-66423bc4a081","Type":"ContainerStarted","Data":"375bef9ed5aadebfc6614b42d1a27270b8543c203da38ae787c0c3b2315b2a78"} Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.111002 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.114304 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-2xsrv" event={"ID":"0e3eae38-0d51-4c6c-9258-41a7699cb1f1","Type":"ContainerDied","Data":"484ab7db35be5d0a568b9ab658b757b3f7d5ddefba77dbd5bf95cd65263341ea"} Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.114350 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="484ab7db35be5d0a568b9ab658b757b3f7d5ddefba77dbd5bf95cd65263341ea" Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.114360 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-2xsrv" Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.117523 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e90b0e97-f543-4d75-bb6d-8d96c7b3d663","Type":"ContainerStarted","Data":"d49b34ac0e0ce74cc8312e488df73e9b24d6383702336af4a885df065a102656"} Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.159496 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-6vxgb" podStartSLOduration=22.37962466 podStartE2EDuration="53.159472684s" podCreationTimestamp="2025-12-09 17:15:36 +0000 UTC" firstStartedPulling="2025-12-09 17:15:53.617301749 +0000 UTC m=+1139.608412382" lastFinishedPulling="2025-12-09 17:16:24.397149763 +0000 UTC m=+1170.388260406" observedRunningTime="2025-12-09 17:16:29.151173865 +0000 UTC m=+1175.142284508" watchObservedRunningTime="2025-12-09 17:16:29.159472684 +0000 UTC m=+1175.150583317" Dec 09 17:16:29 crc kubenswrapper[4840]: I1209 17:16:29.190608 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=45.54410264 podStartE2EDuration="1m3.19058647s" podCreationTimestamp="2025-12-09 17:15:26 +0000 UTC" firstStartedPulling="2025-12-09 17:15:36.027217293 +0000 UTC m=+1122.018327926" lastFinishedPulling="2025-12-09 17:15:53.673701123 +0000 UTC m=+1139.664811756" observedRunningTime="2025-12-09 17:16:29.183501896 +0000 UTC m=+1175.174612539" watchObservedRunningTime="2025-12-09 17:16:29.19058647 +0000 UTC m=+1175.181697103" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.620581 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.721036 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-lpswr"] Dec 09 17:16:30 crc kubenswrapper[4840]: E1209 17:16:30.721481 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b4945f7-403c-4d4a-aa3c-882776dc0240" containerName="init" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.721502 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b4945f7-403c-4d4a-aa3c-882776dc0240" containerName="init" Dec 09 17:16:30 crc kubenswrapper[4840]: E1209 17:16:30.721518 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="956f73a1-e1d1-4cc8-aa72-d9be637417b1" containerName="init" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.721527 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="956f73a1-e1d1-4cc8-aa72-d9be637417b1" containerName="init" Dec 09 17:16:30 crc kubenswrapper[4840]: E1209 17:16:30.721546 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e3eae38-0d51-4c6c-9258-41a7699cb1f1" containerName="swift-ring-rebalance" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.721557 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e3eae38-0d51-4c6c-9258-41a7699cb1f1" containerName="swift-ring-rebalance" Dec 09 17:16:30 crc kubenswrapper[4840]: E1209 17:16:30.721598 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="956f73a1-e1d1-4cc8-aa72-d9be637417b1" containerName="dnsmasq-dns" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.721607 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="956f73a1-e1d1-4cc8-aa72-d9be637417b1" containerName="dnsmasq-dns" Dec 09 17:16:30 crc kubenswrapper[4840]: E1209 17:16:30.721633 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b4945f7-403c-4d4a-aa3c-882776dc0240" containerName="dnsmasq-dns" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.721639 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b4945f7-403c-4d4a-aa3c-882776dc0240" containerName="dnsmasq-dns" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.721854 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e3eae38-0d51-4c6c-9258-41a7699cb1f1" containerName="swift-ring-rebalance" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.721876 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b4945f7-403c-4d4a-aa3c-882776dc0240" containerName="dnsmasq-dns" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.721892 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="956f73a1-e1d1-4cc8-aa72-d9be637417b1" containerName="dnsmasq-dns" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.722558 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-lpswr" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.730672 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-2bce-account-create-update-slw4q"] Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.731947 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2bce-account-create-update-slw4q" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.736436 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.744772 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-lpswr"] Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.747364 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.747583 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.759774 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-2bce-account-create-update-slw4q"] Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.771083 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/960e53e8-5055-4d17-a8d0-acecc475511e-operator-scripts\") pod \"keystone-2bce-account-create-update-slw4q\" (UID: \"960e53e8-5055-4d17-a8d0-acecc475511e\") " pod="openstack/keystone-2bce-account-create-update-slw4q" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.771179 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjmbt\" (UniqueName: \"kubernetes.io/projected/960e53e8-5055-4d17-a8d0-acecc475511e-kube-api-access-bjmbt\") pod \"keystone-2bce-account-create-update-slw4q\" (UID: \"960e53e8-5055-4d17-a8d0-acecc475511e\") " pod="openstack/keystone-2bce-account-create-update-slw4q" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.873052 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/960e53e8-5055-4d17-a8d0-acecc475511e-operator-scripts\") pod \"keystone-2bce-account-create-update-slw4q\" (UID: \"960e53e8-5055-4d17-a8d0-acecc475511e\") " pod="openstack/keystone-2bce-account-create-update-slw4q" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.873156 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjmbt\" (UniqueName: \"kubernetes.io/projected/960e53e8-5055-4d17-a8d0-acecc475511e-kube-api-access-bjmbt\") pod \"keystone-2bce-account-create-update-slw4q\" (UID: \"960e53e8-5055-4d17-a8d0-acecc475511e\") " pod="openstack/keystone-2bce-account-create-update-slw4q" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.873315 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4140b4de-95a1-4ecc-bcb7-13252484a4be-operator-scripts\") pod \"keystone-db-create-lpswr\" (UID: \"4140b4de-95a1-4ecc-bcb7-13252484a4be\") " pod="openstack/keystone-db-create-lpswr" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.873345 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdqp5\" (UniqueName: \"kubernetes.io/projected/4140b4de-95a1-4ecc-bcb7-13252484a4be-kube-api-access-jdqp5\") pod \"keystone-db-create-lpswr\" (UID: \"4140b4de-95a1-4ecc-bcb7-13252484a4be\") " pod="openstack/keystone-db-create-lpswr" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.873896 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/960e53e8-5055-4d17-a8d0-acecc475511e-operator-scripts\") pod \"keystone-2bce-account-create-update-slw4q\" (UID: \"960e53e8-5055-4d17-a8d0-acecc475511e\") " pod="openstack/keystone-2bce-account-create-update-slw4q" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.886183 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-wzl7f"] Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.887331 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wzl7f" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.940504 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-wzl7f"] Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.975068 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4140b4de-95a1-4ecc-bcb7-13252484a4be-operator-scripts\") pod \"keystone-db-create-lpswr\" (UID: \"4140b4de-95a1-4ecc-bcb7-13252484a4be\") " pod="openstack/keystone-db-create-lpswr" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.975136 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdqp5\" (UniqueName: \"kubernetes.io/projected/4140b4de-95a1-4ecc-bcb7-13252484a4be-kube-api-access-jdqp5\") pod \"keystone-db-create-lpswr\" (UID: \"4140b4de-95a1-4ecc-bcb7-13252484a4be\") " pod="openstack/keystone-db-create-lpswr" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.976164 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4140b4de-95a1-4ecc-bcb7-13252484a4be-operator-scripts\") pod \"keystone-db-create-lpswr\" (UID: \"4140b4de-95a1-4ecc-bcb7-13252484a4be\") " pod="openstack/keystone-db-create-lpswr" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.988342 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-c44f-account-create-update-rmd7r"] Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.989470 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c44f-account-create-update-rmd7r" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.992456 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Dec 09 17:16:30 crc kubenswrapper[4840]: I1209 17:16:30.996777 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c44f-account-create-update-rmd7r"] Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.056190 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.077151 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9q6p\" (UniqueName: \"kubernetes.io/projected/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-kube-api-access-s9q6p\") pod \"placement-db-create-wzl7f\" (UID: \"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b\") " pod="openstack/placement-db-create-wzl7f" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.077280 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjmbt\" (UniqueName: \"kubernetes.io/projected/960e53e8-5055-4d17-a8d0-acecc475511e-kube-api-access-bjmbt\") pod \"keystone-2bce-account-create-update-slw4q\" (UID: \"960e53e8-5055-4d17-a8d0-acecc475511e\") " pod="openstack/keystone-2bce-account-create-update-slw4q" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.077430 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-operator-scripts\") pod \"placement-db-create-wzl7f\" (UID: \"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b\") " pod="openstack/placement-db-create-wzl7f" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.077778 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdqp5\" (UniqueName: \"kubernetes.io/projected/4140b4de-95a1-4ecc-bcb7-13252484a4be-kube-api-access-jdqp5\") pod \"keystone-db-create-lpswr\" (UID: \"4140b4de-95a1-4ecc-bcb7-13252484a4be\") " pod="openstack/keystone-db-create-lpswr" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.122940 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.133363 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d65f699f-r5r2w"] Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.133620 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" podUID="dc428deb-2794-47e6-879d-fa0b15023a60" containerName="dnsmasq-dns" containerID="cri-o://31c7007edf7cdbbf02637963255fc7cfa65dbad30f589cfa485f37360e126325" gracePeriod=10 Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.178699 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9q6p\" (UniqueName: \"kubernetes.io/projected/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-kube-api-access-s9q6p\") pod \"placement-db-create-wzl7f\" (UID: \"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b\") " pod="openstack/placement-db-create-wzl7f" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.179040 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-operator-scripts\") pod \"placement-c44f-account-create-update-rmd7r\" (UID: \"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8\") " pod="openstack/placement-c44f-account-create-update-rmd7r" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.179077 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-operator-scripts\") pod \"placement-db-create-wzl7f\" (UID: \"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b\") " pod="openstack/placement-db-create-wzl7f" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.179169 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb9vx\" (UniqueName: \"kubernetes.io/projected/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-kube-api-access-sb9vx\") pod \"placement-c44f-account-create-update-rmd7r\" (UID: \"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8\") " pod="openstack/placement-c44f-account-create-update-rmd7r" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.179699 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-operator-scripts\") pod \"placement-db-create-wzl7f\" (UID: \"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b\") " pod="openstack/placement-db-create-wzl7f" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.214581 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9q6p\" (UniqueName: \"kubernetes.io/projected/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-kube-api-access-s9q6p\") pod \"placement-db-create-wzl7f\" (UID: \"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b\") " pod="openstack/placement-db-create-wzl7f" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.216077 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wzl7f" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.281197 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb9vx\" (UniqueName: \"kubernetes.io/projected/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-kube-api-access-sb9vx\") pod \"placement-c44f-account-create-update-rmd7r\" (UID: \"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8\") " pod="openstack/placement-c44f-account-create-update-rmd7r" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.281384 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-operator-scripts\") pod \"placement-c44f-account-create-update-rmd7r\" (UID: \"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8\") " pod="openstack/placement-c44f-account-create-update-rmd7r" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.282125 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-operator-scripts\") pod \"placement-c44f-account-create-update-rmd7r\" (UID: \"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8\") " pod="openstack/placement-c44f-account-create-update-rmd7r" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.298987 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-5lvq6"] Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.300247 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5lvq6" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.333638 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb9vx\" (UniqueName: \"kubernetes.io/projected/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-kube-api-access-sb9vx\") pod \"placement-c44f-account-create-update-rmd7r\" (UID: \"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8\") " pod="openstack/placement-c44f-account-create-update-rmd7r" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.350636 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-lpswr" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.351822 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5lvq6"] Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.368193 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2bce-account-create-update-slw4q" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.389323 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9npsj\" (UniqueName: \"kubernetes.io/projected/a767ff72-af0e-4bb1-b30d-7b760595f234-kube-api-access-9npsj\") pod \"glance-db-create-5lvq6\" (UID: \"a767ff72-af0e-4bb1-b30d-7b760595f234\") " pod="openstack/glance-db-create-5lvq6" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.389428 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a767ff72-af0e-4bb1-b30d-7b760595f234-operator-scripts\") pod \"glance-db-create-5lvq6\" (UID: \"a767ff72-af0e-4bb1-b30d-7b760595f234\") " pod="openstack/glance-db-create-5lvq6" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.422764 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-6847-account-create-update-674bs"] Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.439148 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6847-account-create-update-674bs" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.445242 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.458915 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-6847-account-create-update-674bs"] Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.492912 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a767ff72-af0e-4bb1-b30d-7b760595f234-operator-scripts\") pod \"glance-db-create-5lvq6\" (UID: \"a767ff72-af0e-4bb1-b30d-7b760595f234\") " pod="openstack/glance-db-create-5lvq6" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.493089 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9npsj\" (UniqueName: \"kubernetes.io/projected/a767ff72-af0e-4bb1-b30d-7b760595f234-kube-api-access-9npsj\") pod \"glance-db-create-5lvq6\" (UID: \"a767ff72-af0e-4bb1-b30d-7b760595f234\") " pod="openstack/glance-db-create-5lvq6" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.494611 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a767ff72-af0e-4bb1-b30d-7b760595f234-operator-scripts\") pod \"glance-db-create-5lvq6\" (UID: \"a767ff72-af0e-4bb1-b30d-7b760595f234\") " pod="openstack/glance-db-create-5lvq6" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.526251 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9npsj\" (UniqueName: \"kubernetes.io/projected/a767ff72-af0e-4bb1-b30d-7b760595f234-kube-api-access-9npsj\") pod \"glance-db-create-5lvq6\" (UID: \"a767ff72-af0e-4bb1-b30d-7b760595f234\") " pod="openstack/glance-db-create-5lvq6" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.557026 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5lvq6" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.602976 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgr9h\" (UniqueName: \"kubernetes.io/projected/160479a3-c4ef-43bf-b98d-6c92fab32d26-kube-api-access-tgr9h\") pod \"glance-6847-account-create-update-674bs\" (UID: \"160479a3-c4ef-43bf-b98d-6c92fab32d26\") " pod="openstack/glance-6847-account-create-update-674bs" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.603017 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/160479a3-c4ef-43bf-b98d-6c92fab32d26-operator-scripts\") pod \"glance-6847-account-create-update-674bs\" (UID: \"160479a3-c4ef-43bf-b98d-6c92fab32d26\") " pod="openstack/glance-6847-account-create-update-674bs" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.622777 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c44f-account-create-update-rmd7r" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.698650 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.704517 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgr9h\" (UniqueName: \"kubernetes.io/projected/160479a3-c4ef-43bf-b98d-6c92fab32d26-kube-api-access-tgr9h\") pod \"glance-6847-account-create-update-674bs\" (UID: \"160479a3-c4ef-43bf-b98d-6c92fab32d26\") " pod="openstack/glance-6847-account-create-update-674bs" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.704564 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/160479a3-c4ef-43bf-b98d-6c92fab32d26-operator-scripts\") pod \"glance-6847-account-create-update-674bs\" (UID: \"160479a3-c4ef-43bf-b98d-6c92fab32d26\") " pod="openstack/glance-6847-account-create-update-674bs" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.706147 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/160479a3-c4ef-43bf-b98d-6c92fab32d26-operator-scripts\") pod \"glance-6847-account-create-update-674bs\" (UID: \"160479a3-c4ef-43bf-b98d-6c92fab32d26\") " pod="openstack/glance-6847-account-create-update-674bs" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.753889 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgr9h\" (UniqueName: \"kubernetes.io/projected/160479a3-c4ef-43bf-b98d-6c92fab32d26-kube-api-access-tgr9h\") pod \"glance-6847-account-create-update-674bs\" (UID: \"160479a3-c4ef-43bf-b98d-6c92fab32d26\") " pod="openstack/glance-6847-account-create-update-674bs" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.886675 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6847-account-create-update-674bs" Dec 09 17:16:31 crc kubenswrapper[4840]: I1209 17:16:31.937140 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-wzl7f"] Dec 09 17:16:32 crc kubenswrapper[4840]: W1209 17:16:32.120185 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9f3b116_8f98_4b31_8bf2_71f2c9dca16b.slice/crio-7461595b4c30fa9b11328ee5ddc4d1291a4dc0068c291a546fea731b438d6d37 WatchSource:0}: Error finding container 7461595b4c30fa9b11328ee5ddc4d1291a4dc0068c291a546fea731b438d6d37: Status 404 returned error can't find the container with id 7461595b4c30fa9b11328ee5ddc4d1291a4dc0068c291a546fea731b438d6d37 Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.156359 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-wzl7f" event={"ID":"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b","Type":"ContainerStarted","Data":"7461595b4c30fa9b11328ee5ddc4d1291a4dc0068c291a546fea731b438d6d37"} Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.159272 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e90b0e97-f543-4d75-bb6d-8d96c7b3d663","Type":"ContainerStarted","Data":"99b80de4e52773d9dc5ef8a33fa6e1aa4c163d3f46ecb5d4fffab8195e3aca6b"} Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.161864 4840 generic.go:334] "Generic (PLEG): container finished" podID="dc428deb-2794-47e6-879d-fa0b15023a60" containerID="31c7007edf7cdbbf02637963255fc7cfa65dbad30f589cfa485f37360e126325" exitCode=0 Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.162276 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" event={"ID":"dc428deb-2794-47e6-879d-fa0b15023a60","Type":"ContainerDied","Data":"31c7007edf7cdbbf02637963255fc7cfa65dbad30f589cfa485f37360e126325"} Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.508433 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-lpswr"] Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.892744 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.935072 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-ovsdbserver-nb\") pod \"dc428deb-2794-47e6-879d-fa0b15023a60\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.935459 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-dns-svc\") pod \"dc428deb-2794-47e6-879d-fa0b15023a60\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.935543 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8km5\" (UniqueName: \"kubernetes.io/projected/dc428deb-2794-47e6-879d-fa0b15023a60-kube-api-access-t8km5\") pod \"dc428deb-2794-47e6-879d-fa0b15023a60\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.935772 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-config\") pod \"dc428deb-2794-47e6-879d-fa0b15023a60\" (UID: \"dc428deb-2794-47e6-879d-fa0b15023a60\") " Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.965185 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc428deb-2794-47e6-879d-fa0b15023a60-kube-api-access-t8km5" (OuterVolumeSpecName: "kube-api-access-t8km5") pod "dc428deb-2794-47e6-879d-fa0b15023a60" (UID: "dc428deb-2794-47e6-879d-fa0b15023a60"). InnerVolumeSpecName "kube-api-access-t8km5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.987352 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-6847-account-create-update-674bs"] Dec 09 17:16:32 crc kubenswrapper[4840]: I1209 17:16:32.995425 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-2bce-account-create-update-slw4q"] Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.001375 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-config" (OuterVolumeSpecName: "config") pod "dc428deb-2794-47e6-879d-fa0b15023a60" (UID: "dc428deb-2794-47e6-879d-fa0b15023a60"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.006646 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c44f-account-create-update-rmd7r"] Dec 09 17:16:33 crc kubenswrapper[4840]: W1209 17:16:33.008796 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d4e3a74_889a_44eb_ac49_4db6cea8bcd8.slice/crio-ebde8232e8032f93e3b2d45fd9d62f9de90f0fcbc2af82ff9994bdc2c27c88fb WatchSource:0}: Error finding container ebde8232e8032f93e3b2d45fd9d62f9de90f0fcbc2af82ff9994bdc2c27c88fb: Status 404 returned error can't find the container with id ebde8232e8032f93e3b2d45fd9d62f9de90f0fcbc2af82ff9994bdc2c27c88fb Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.013500 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dc428deb-2794-47e6-879d-fa0b15023a60" (UID: "dc428deb-2794-47e6-879d-fa0b15023a60"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.017460 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dc428deb-2794-47e6-879d-fa0b15023a60" (UID: "dc428deb-2794-47e6-879d-fa0b15023a60"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.023255 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5lvq6"] Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.037293 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.037330 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.037343 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc428deb-2794-47e6-879d-fa0b15023a60-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.037356 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8km5\" (UniqueName: \"kubernetes.io/projected/dc428deb-2794-47e6-879d-fa0b15023a60-kube-api-access-t8km5\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.171071 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-2bce-account-create-update-slw4q" event={"ID":"960e53e8-5055-4d17-a8d0-acecc475511e","Type":"ContainerStarted","Data":"a42f2d713fadebc5141feed487f6d0599f99b2179797d42d8290eca6c0e3a2fd"} Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.174450 4840 generic.go:334] "Generic (PLEG): container finished" podID="c9f3b116-8f98-4b31-8bf2-71f2c9dca16b" containerID="d26be9469d2567bd01c76582b8578a055de89e2a3d65843684e3814bb57485d5" exitCode=0 Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.174501 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-wzl7f" event={"ID":"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b","Type":"ContainerDied","Data":"d26be9469d2567bd01c76582b8578a055de89e2a3d65843684e3814bb57485d5"} Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.176571 4840 generic.go:334] "Generic (PLEG): container finished" podID="4140b4de-95a1-4ecc-bcb7-13252484a4be" containerID="3c26da42463373081077bad1a87737ef2711a65e44049d16660238c9eec290a9" exitCode=0 Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.176624 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-lpswr" event={"ID":"4140b4de-95a1-4ecc-bcb7-13252484a4be","Type":"ContainerDied","Data":"3c26da42463373081077bad1a87737ef2711a65e44049d16660238c9eec290a9"} Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.176643 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-lpswr" event={"ID":"4140b4de-95a1-4ecc-bcb7-13252484a4be","Type":"ContainerStarted","Data":"bf4367ebac510e1fd86d64e3cf5141b2d8f53684c340d254032faf39d85eec38"} Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.179411 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" event={"ID":"dc428deb-2794-47e6-879d-fa0b15023a60","Type":"ContainerDied","Data":"ea0d6b74828bd02d57a6458f3ea434c1fe3d0b869b030ea4e27385e967d7f8de"} Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.179447 4840 scope.go:117] "RemoveContainer" containerID="31c7007edf7cdbbf02637963255fc7cfa65dbad30f589cfa485f37360e126325" Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.179559 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d65f699f-r5r2w" Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.183493 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5lvq6" event={"ID":"a767ff72-af0e-4bb1-b30d-7b760595f234","Type":"ContainerStarted","Data":"e0618ffe89124f732627f153c86206424ffc95e8b96fedc6c92c49d08ddb15fe"} Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.185286 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6847-account-create-update-674bs" event={"ID":"160479a3-c4ef-43bf-b98d-6c92fab32d26","Type":"ContainerStarted","Data":"5f14dca7f465d0329b70eb43f71f282e42febca2f6d7db3bada518f9c30564ba"} Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.203263 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c44f-account-create-update-rmd7r" event={"ID":"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8","Type":"ContainerStarted","Data":"ebde8232e8032f93e3b2d45fd9d62f9de90f0fcbc2af82ff9994bdc2c27c88fb"} Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.236680 4840 scope.go:117] "RemoveContainer" containerID="470176f56f403bc20f93e949c983fc57dd1b894d1ed79c8b7f1f4c89ce2b230c" Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.274035 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d65f699f-r5r2w"] Dec 09 17:16:33 crc kubenswrapper[4840]: I1209 17:16:33.285313 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d65f699f-r5r2w"] Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.214164 4840 generic.go:334] "Generic (PLEG): container finished" podID="a767ff72-af0e-4bb1-b30d-7b760595f234" containerID="80bcb9b2d4f4380e1d19072232e3cfab681bc6a7ebc98fa91e0e7162ba1cd1e5" exitCode=0 Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.214322 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5lvq6" event={"ID":"a767ff72-af0e-4bb1-b30d-7b760595f234","Type":"ContainerDied","Data":"80bcb9b2d4f4380e1d19072232e3cfab681bc6a7ebc98fa91e0e7162ba1cd1e5"} Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.216684 4840 generic.go:334] "Generic (PLEG): container finished" podID="160479a3-c4ef-43bf-b98d-6c92fab32d26" containerID="00f37ec0ca374e4e2afcaeac7f96f559b90706467a82277ff3b5c54d318595c2" exitCode=0 Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.216836 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6847-account-create-update-674bs" event={"ID":"160479a3-c4ef-43bf-b98d-6c92fab32d26","Type":"ContainerDied","Data":"00f37ec0ca374e4e2afcaeac7f96f559b90706467a82277ff3b5c54d318595c2"} Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.218188 4840 generic.go:334] "Generic (PLEG): container finished" podID="5d4e3a74-889a-44eb-ac49-4db6cea8bcd8" containerID="05255f1811dfe227a56f4deeae2b47df22d0a542c1f8c7eb67bb1a60a6e3188a" exitCode=0 Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.218288 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c44f-account-create-update-rmd7r" event={"ID":"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8","Type":"ContainerDied","Data":"05255f1811dfe227a56f4deeae2b47df22d0a542c1f8c7eb67bb1a60a6e3188a"} Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.220267 4840 generic.go:334] "Generic (PLEG): container finished" podID="960e53e8-5055-4d17-a8d0-acecc475511e" containerID="554027641c60fbce63b1e7d768e9a313aa394ab2908707d448d943b46076d871" exitCode=0 Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.220314 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-2bce-account-create-update-slw4q" event={"ID":"960e53e8-5055-4d17-a8d0-acecc475511e","Type":"ContainerDied","Data":"554027641c60fbce63b1e7d768e9a313aa394ab2908707d448d943b46076d871"} Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.223904 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a4b9253d-0e13-4dd3-8b9a-7428281a743d","Type":"ContainerStarted","Data":"4a8b8ff368f95f83a0074476e1d4301f3dc8517302d050fd819c8c587c73843d"} Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.306230 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=15.633121065 podStartE2EDuration="54.30503058s" podCreationTimestamp="2025-12-09 17:15:40 +0000 UTC" firstStartedPulling="2025-12-09 17:15:54.238362233 +0000 UTC m=+1140.229472866" lastFinishedPulling="2025-12-09 17:16:32.910271748 +0000 UTC m=+1178.901382381" observedRunningTime="2025-12-09 17:16:34.28833191 +0000 UTC m=+1180.279442543" watchObservedRunningTime="2025-12-09 17:16:34.30503058 +0000 UTC m=+1180.296141213" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.489129 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-xzc88" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.581739 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.664735 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc428deb-2794-47e6-879d-fa0b15023a60" path="/var/lib/kubelet/pods/dc428deb-2794-47e6-879d-fa0b15023a60/volumes" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.774252 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wzl7f" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.783829 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-lpswr" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.874748 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-operator-scripts\") pod \"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b\" (UID: \"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b\") " Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.874819 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4140b4de-95a1-4ecc-bcb7-13252484a4be-operator-scripts\") pod \"4140b4de-95a1-4ecc-bcb7-13252484a4be\" (UID: \"4140b4de-95a1-4ecc-bcb7-13252484a4be\") " Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.874884 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdqp5\" (UniqueName: \"kubernetes.io/projected/4140b4de-95a1-4ecc-bcb7-13252484a4be-kube-api-access-jdqp5\") pod \"4140b4de-95a1-4ecc-bcb7-13252484a4be\" (UID: \"4140b4de-95a1-4ecc-bcb7-13252484a4be\") " Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.874952 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9q6p\" (UniqueName: \"kubernetes.io/projected/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-kube-api-access-s9q6p\") pod \"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b\" (UID: \"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b\") " Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.875368 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c9f3b116-8f98-4b31-8bf2-71f2c9dca16b" (UID: "c9f3b116-8f98-4b31-8bf2-71f2c9dca16b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.876035 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4140b4de-95a1-4ecc-bcb7-13252484a4be-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4140b4de-95a1-4ecc-bcb7-13252484a4be" (UID: "4140b4de-95a1-4ecc-bcb7-13252484a4be"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.887953 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-kube-api-access-s9q6p" (OuterVolumeSpecName: "kube-api-access-s9q6p") pod "c9f3b116-8f98-4b31-8bf2-71f2c9dca16b" (UID: "c9f3b116-8f98-4b31-8bf2-71f2c9dca16b"). InnerVolumeSpecName "kube-api-access-s9q6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.890144 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4140b4de-95a1-4ecc-bcb7-13252484a4be-kube-api-access-jdqp5" (OuterVolumeSpecName: "kube-api-access-jdqp5") pod "4140b4de-95a1-4ecc-bcb7-13252484a4be" (UID: "4140b4de-95a1-4ecc-bcb7-13252484a4be"). InnerVolumeSpecName "kube-api-access-jdqp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.977720 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.977779 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4140b4de-95a1-4ecc-bcb7-13252484a4be-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.977794 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdqp5\" (UniqueName: \"kubernetes.io/projected/4140b4de-95a1-4ecc-bcb7-13252484a4be-kube-api-access-jdqp5\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:34 crc kubenswrapper[4840]: I1209 17:16:34.977807 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9q6p\" (UniqueName: \"kubernetes.io/projected/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b-kube-api-access-s9q6p\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:35 crc kubenswrapper[4840]: I1209 17:16:35.234322 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-wzl7f" event={"ID":"c9f3b116-8f98-4b31-8bf2-71f2c9dca16b","Type":"ContainerDied","Data":"7461595b4c30fa9b11328ee5ddc4d1291a4dc0068c291a546fea731b438d6d37"} Dec 09 17:16:35 crc kubenswrapper[4840]: I1209 17:16:35.234369 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7461595b4c30fa9b11328ee5ddc4d1291a4dc0068c291a546fea731b438d6d37" Dec 09 17:16:35 crc kubenswrapper[4840]: I1209 17:16:35.234442 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wzl7f" Dec 09 17:16:35 crc kubenswrapper[4840]: I1209 17:16:35.244499 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-lpswr" Dec 09 17:16:35 crc kubenswrapper[4840]: I1209 17:16:35.244557 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-lpswr" event={"ID":"4140b4de-95a1-4ecc-bcb7-13252484a4be","Type":"ContainerDied","Data":"bf4367ebac510e1fd86d64e3cf5141b2d8f53684c340d254032faf39d85eec38"} Dec 09 17:16:35 crc kubenswrapper[4840]: I1209 17:16:35.244579 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf4367ebac510e1fd86d64e3cf5141b2d8f53684c340d254032faf39d85eec38" Dec 09 17:16:35 crc kubenswrapper[4840]: I1209 17:16:35.528001 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cloudkitty-lokistack-ingester-0" podUID="b45f4212-4ee0-4679-b115-d8d231bf946d" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 09 17:16:35 crc kubenswrapper[4840]: I1209 17:16:35.646395 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 09 17:16:36 crc kubenswrapper[4840]: I1209 17:16:36.521186 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:36 crc kubenswrapper[4840]: I1209 17:16:36.529811 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534-etc-swift\") pod \"swift-storage-0\" (UID: \"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534\") " pod="openstack/swift-storage-0" Dec 09 17:16:36 crc kubenswrapper[4840]: I1209 17:16:36.643352 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 09 17:16:37 crc kubenswrapper[4840]: I1209 17:16:37.171951 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Dec 09 17:16:37 crc kubenswrapper[4840]: I1209 17:16:37.302657 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Dec 09 17:16:37 crc kubenswrapper[4840]: I1209 17:16:37.303273 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Dec 09 17:16:37 crc kubenswrapper[4840]: I1209 17:16:37.942839 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6847-account-create-update-674bs" Dec 09 17:16:37 crc kubenswrapper[4840]: I1209 17:16:37.948320 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c44f-account-create-update-rmd7r" Dec 09 17:16:37 crc kubenswrapper[4840]: I1209 17:16:37.969553 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5lvq6" Dec 09 17:16:37 crc kubenswrapper[4840]: I1209 17:16:37.972580 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2bce-account-create-update-slw4q" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.052934 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb9vx\" (UniqueName: \"kubernetes.io/projected/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-kube-api-access-sb9vx\") pod \"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8\" (UID: \"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8\") " Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.053009 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-operator-scripts\") pod \"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8\" (UID: \"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8\") " Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.053040 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgr9h\" (UniqueName: \"kubernetes.io/projected/160479a3-c4ef-43bf-b98d-6c92fab32d26-kube-api-access-tgr9h\") pod \"160479a3-c4ef-43bf-b98d-6c92fab32d26\" (UID: \"160479a3-c4ef-43bf-b98d-6c92fab32d26\") " Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.053169 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/160479a3-c4ef-43bf-b98d-6c92fab32d26-operator-scripts\") pod \"160479a3-c4ef-43bf-b98d-6c92fab32d26\" (UID: \"160479a3-c4ef-43bf-b98d-6c92fab32d26\") " Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.054857 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/160479a3-c4ef-43bf-b98d-6c92fab32d26-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "160479a3-c4ef-43bf-b98d-6c92fab32d26" (UID: "160479a3-c4ef-43bf-b98d-6c92fab32d26"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.055304 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5d4e3a74-889a-44eb-ac49-4db6cea8bcd8" (UID: "5d4e3a74-889a-44eb-ac49-4db6cea8bcd8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.059481 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-kube-api-access-sb9vx" (OuterVolumeSpecName: "kube-api-access-sb9vx") pod "5d4e3a74-889a-44eb-ac49-4db6cea8bcd8" (UID: "5d4e3a74-889a-44eb-ac49-4db6cea8bcd8"). InnerVolumeSpecName "kube-api-access-sb9vx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.059595 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/160479a3-c4ef-43bf-b98d-6c92fab32d26-kube-api-access-tgr9h" (OuterVolumeSpecName: "kube-api-access-tgr9h") pod "160479a3-c4ef-43bf-b98d-6c92fab32d26" (UID: "160479a3-c4ef-43bf-b98d-6c92fab32d26"). InnerVolumeSpecName "kube-api-access-tgr9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.155025 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9npsj\" (UniqueName: \"kubernetes.io/projected/a767ff72-af0e-4bb1-b30d-7b760595f234-kube-api-access-9npsj\") pod \"a767ff72-af0e-4bb1-b30d-7b760595f234\" (UID: \"a767ff72-af0e-4bb1-b30d-7b760595f234\") " Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.155075 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/960e53e8-5055-4d17-a8d0-acecc475511e-operator-scripts\") pod \"960e53e8-5055-4d17-a8d0-acecc475511e\" (UID: \"960e53e8-5055-4d17-a8d0-acecc475511e\") " Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.155195 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjmbt\" (UniqueName: \"kubernetes.io/projected/960e53e8-5055-4d17-a8d0-acecc475511e-kube-api-access-bjmbt\") pod \"960e53e8-5055-4d17-a8d0-acecc475511e\" (UID: \"960e53e8-5055-4d17-a8d0-acecc475511e\") " Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.155238 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a767ff72-af0e-4bb1-b30d-7b760595f234-operator-scripts\") pod \"a767ff72-af0e-4bb1-b30d-7b760595f234\" (UID: \"a767ff72-af0e-4bb1-b30d-7b760595f234\") " Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.155555 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb9vx\" (UniqueName: \"kubernetes.io/projected/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-kube-api-access-sb9vx\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.155570 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.155579 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgr9h\" (UniqueName: \"kubernetes.io/projected/160479a3-c4ef-43bf-b98d-6c92fab32d26-kube-api-access-tgr9h\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.155588 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/160479a3-c4ef-43bf-b98d-6c92fab32d26-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.155581 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/960e53e8-5055-4d17-a8d0-acecc475511e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "960e53e8-5055-4d17-a8d0-acecc475511e" (UID: "960e53e8-5055-4d17-a8d0-acecc475511e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.155930 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a767ff72-af0e-4bb1-b30d-7b760595f234-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a767ff72-af0e-4bb1-b30d-7b760595f234" (UID: "a767ff72-af0e-4bb1-b30d-7b760595f234"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.159234 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.160303 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/960e53e8-5055-4d17-a8d0-acecc475511e-kube-api-access-bjmbt" (OuterVolumeSpecName: "kube-api-access-bjmbt") pod "960e53e8-5055-4d17-a8d0-acecc475511e" (UID: "960e53e8-5055-4d17-a8d0-acecc475511e"). InnerVolumeSpecName "kube-api-access-bjmbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.160894 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a767ff72-af0e-4bb1-b30d-7b760595f234-kube-api-access-9npsj" (OuterVolumeSpecName: "kube-api-access-9npsj") pod "a767ff72-af0e-4bb1-b30d-7b760595f234" (UID: "a767ff72-af0e-4bb1-b30d-7b760595f234"). InnerVolumeSpecName "kube-api-access-9npsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.257729 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a767ff72-af0e-4bb1-b30d-7b760595f234-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.257796 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9npsj\" (UniqueName: \"kubernetes.io/projected/a767ff72-af0e-4bb1-b30d-7b760595f234-kube-api-access-9npsj\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.257813 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/960e53e8-5055-4d17-a8d0-acecc475511e-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.257854 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjmbt\" (UniqueName: \"kubernetes.io/projected/960e53e8-5055-4d17-a8d0-acecc475511e-kube-api-access-bjmbt\") on node \"crc\" DevicePath \"\"" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.266138 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.295292 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2bce-account-create-update-slw4q" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.295626 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-2bce-account-create-update-slw4q" event={"ID":"960e53e8-5055-4d17-a8d0-acecc475511e","Type":"ContainerDied","Data":"a42f2d713fadebc5141feed487f6d0599f99b2179797d42d8290eca6c0e3a2fd"} Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.295656 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a42f2d713fadebc5141feed487f6d0599f99b2179797d42d8290eca6c0e3a2fd" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.303365 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e90b0e97-f543-4d75-bb6d-8d96c7b3d663","Type":"ContainerStarted","Data":"ecda9951d590e5506700094ad4824525cf1564371b780555f9c7808f5d97d461"} Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.320261 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5lvq6" event={"ID":"a767ff72-af0e-4bb1-b30d-7b760595f234","Type":"ContainerDied","Data":"e0618ffe89124f732627f153c86206424ffc95e8b96fedc6c92c49d08ddb15fe"} Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.320304 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0618ffe89124f732627f153c86206424ffc95e8b96fedc6c92c49d08ddb15fe" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.320398 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5lvq6" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.323710 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6847-account-create-update-674bs" event={"ID":"160479a3-c4ef-43bf-b98d-6c92fab32d26","Type":"ContainerDied","Data":"5f14dca7f465d0329b70eb43f71f282e42febca2f6d7db3bada518f9c30564ba"} Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.323732 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6847-account-create-update-674bs" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.323747 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f14dca7f465d0329b70eb43f71f282e42febca2f6d7db3bada518f9c30564ba" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.326301 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c44f-account-create-update-rmd7r" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.326358 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c44f-account-create-update-rmd7r" event={"ID":"5d4e3a74-889a-44eb-ac49-4db6cea8bcd8","Type":"ContainerDied","Data":"ebde8232e8032f93e3b2d45fd9d62f9de90f0fcbc2af82ff9994bdc2c27c88fb"} Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.326393 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ebde8232e8032f93e3b2d45fd9d62f9de90f0fcbc2af82ff9994bdc2c27c88fb" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.338145 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=22.279972336 podStartE2EDuration="1m5.338127724s" podCreationTimestamp="2025-12-09 17:15:33 +0000 UTC" firstStartedPulling="2025-12-09 17:15:54.981071379 +0000 UTC m=+1140.972182002" lastFinishedPulling="2025-12-09 17:16:38.039226757 +0000 UTC m=+1184.030337390" observedRunningTime="2025-12-09 17:16:38.332875642 +0000 UTC m=+1184.323986275" watchObservedRunningTime="2025-12-09 17:16:38.338127724 +0000 UTC m=+1184.329238347" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.445657 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Dec 09 17:16:38 crc kubenswrapper[4840]: E1209 17:16:38.446366 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d4e3a74-889a-44eb-ac49-4db6cea8bcd8" containerName="mariadb-account-create-update" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446383 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d4e3a74-889a-44eb-ac49-4db6cea8bcd8" containerName="mariadb-account-create-update" Dec 09 17:16:38 crc kubenswrapper[4840]: E1209 17:16:38.446399 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc428deb-2794-47e6-879d-fa0b15023a60" containerName="dnsmasq-dns" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446407 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc428deb-2794-47e6-879d-fa0b15023a60" containerName="dnsmasq-dns" Dec 09 17:16:38 crc kubenswrapper[4840]: E1209 17:16:38.446420 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a767ff72-af0e-4bb1-b30d-7b760595f234" containerName="mariadb-database-create" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446426 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="a767ff72-af0e-4bb1-b30d-7b760595f234" containerName="mariadb-database-create" Dec 09 17:16:38 crc kubenswrapper[4840]: E1209 17:16:38.446434 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4140b4de-95a1-4ecc-bcb7-13252484a4be" containerName="mariadb-database-create" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446439 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="4140b4de-95a1-4ecc-bcb7-13252484a4be" containerName="mariadb-database-create" Dec 09 17:16:38 crc kubenswrapper[4840]: E1209 17:16:38.446452 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9f3b116-8f98-4b31-8bf2-71f2c9dca16b" containerName="mariadb-database-create" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446458 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9f3b116-8f98-4b31-8bf2-71f2c9dca16b" containerName="mariadb-database-create" Dec 09 17:16:38 crc kubenswrapper[4840]: E1209 17:16:38.446476 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="160479a3-c4ef-43bf-b98d-6c92fab32d26" containerName="mariadb-account-create-update" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446483 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="160479a3-c4ef-43bf-b98d-6c92fab32d26" containerName="mariadb-account-create-update" Dec 09 17:16:38 crc kubenswrapper[4840]: E1209 17:16:38.446495 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc428deb-2794-47e6-879d-fa0b15023a60" containerName="init" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446501 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc428deb-2794-47e6-879d-fa0b15023a60" containerName="init" Dec 09 17:16:38 crc kubenswrapper[4840]: E1209 17:16:38.446512 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960e53e8-5055-4d17-a8d0-acecc475511e" containerName="mariadb-account-create-update" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446518 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="960e53e8-5055-4d17-a8d0-acecc475511e" containerName="mariadb-account-create-update" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446718 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="960e53e8-5055-4d17-a8d0-acecc475511e" containerName="mariadb-account-create-update" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446739 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9f3b116-8f98-4b31-8bf2-71f2c9dca16b" containerName="mariadb-database-create" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446754 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="a767ff72-af0e-4bb1-b30d-7b760595f234" containerName="mariadb-database-create" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446769 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc428deb-2794-47e6-879d-fa0b15023a60" containerName="dnsmasq-dns" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446796 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="4140b4de-95a1-4ecc-bcb7-13252484a4be" containerName="mariadb-database-create" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446811 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="160479a3-c4ef-43bf-b98d-6c92fab32d26" containerName="mariadb-account-create-update" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.446823 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d4e3a74-889a-44eb-ac49-4db6cea8bcd8" containerName="mariadb-account-create-update" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.448135 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.452176 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.453212 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.453481 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.453537 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-sff8h" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.459371 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.562841 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8730f858-5803-4a82-bf34-63a2b65ddebb-config\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.562912 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8730f858-5803-4a82-bf34-63a2b65ddebb-scripts\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.562936 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8730f858-5803-4a82-bf34-63a2b65ddebb-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.563145 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8730f858-5803-4a82-bf34-63a2b65ddebb-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.563241 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8730f858-5803-4a82-bf34-63a2b65ddebb-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.563287 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxpb7\" (UniqueName: \"kubernetes.io/projected/8730f858-5803-4a82-bf34-63a2b65ddebb-kube-api-access-pxpb7\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.563615 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8730f858-5803-4a82-bf34-63a2b65ddebb-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.579218 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 09 17:16:38 crc kubenswrapper[4840]: W1209 17:16:38.590945 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa3b5d5b_eafc_4e13_93ab_6eaf7a40e534.slice/crio-18f5a93fa6831483e1f48f27cc44d4e7b5c7e73e5cbca13e4568a4ab7d73f283 WatchSource:0}: Error finding container 18f5a93fa6831483e1f48f27cc44d4e7b5c7e73e5cbca13e4568a4ab7d73f283: Status 404 returned error can't find the container with id 18f5a93fa6831483e1f48f27cc44d4e7b5c7e73e5cbca13e4568a4ab7d73f283 Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.665543 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8730f858-5803-4a82-bf34-63a2b65ddebb-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.665819 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8730f858-5803-4a82-bf34-63a2b65ddebb-config\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.665947 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8730f858-5803-4a82-bf34-63a2b65ddebb-scripts\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.666076 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8730f858-5803-4a82-bf34-63a2b65ddebb-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.666211 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8730f858-5803-4a82-bf34-63a2b65ddebb-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.666323 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8730f858-5803-4a82-bf34-63a2b65ddebb-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.666431 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxpb7\" (UniqueName: \"kubernetes.io/projected/8730f858-5803-4a82-bf34-63a2b65ddebb-kube-api-access-pxpb7\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.666719 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8730f858-5803-4a82-bf34-63a2b65ddebb-config\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.666136 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8730f858-5803-4a82-bf34-63a2b65ddebb-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.666755 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8730f858-5803-4a82-bf34-63a2b65ddebb-scripts\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.670553 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8730f858-5803-4a82-bf34-63a2b65ddebb-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.670646 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8730f858-5803-4a82-bf34-63a2b65ddebb-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.671303 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8730f858-5803-4a82-bf34-63a2b65ddebb-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.685358 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxpb7\" (UniqueName: \"kubernetes.io/projected/8730f858-5803-4a82-bf34-63a2b65ddebb-kube-api-access-pxpb7\") pod \"ovn-northd-0\" (UID: \"8730f858-5803-4a82-bf34-63a2b65ddebb\") " pod="openstack/ovn-northd-0" Dec 09 17:16:38 crc kubenswrapper[4840]: I1209 17:16:38.767652 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 09 17:16:39 crc kubenswrapper[4840]: I1209 17:16:39.255569 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 09 17:16:39 crc kubenswrapper[4840]: W1209 17:16:39.257676 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8730f858_5803_4a82_bf34_63a2b65ddebb.slice/crio-fa7cea67460fab24541d07c6a3267094a213b5b5aa9c9895bf47ac010b17c7be WatchSource:0}: Error finding container fa7cea67460fab24541d07c6a3267094a213b5b5aa9c9895bf47ac010b17c7be: Status 404 returned error can't find the container with id fa7cea67460fab24541d07c6a3267094a213b5b5aa9c9895bf47ac010b17c7be Dec 09 17:16:39 crc kubenswrapper[4840]: I1209 17:16:39.335407 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"18f5a93fa6831483e1f48f27cc44d4e7b5c7e73e5cbca13e4568a4ab7d73f283"} Dec 09 17:16:39 crc kubenswrapper[4840]: I1209 17:16:39.336545 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8730f858-5803-4a82-bf34-63a2b65ddebb","Type":"ContainerStarted","Data":"fa7cea67460fab24541d07c6a3267094a213b5b5aa9c9895bf47ac010b17c7be"} Dec 09 17:16:39 crc kubenswrapper[4840]: I1209 17:16:39.581396 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.353136 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8730f858-5803-4a82-bf34-63a2b65ddebb","Type":"ContainerStarted","Data":"58c2427ff767cd3d3d745a5f0318e7692268a43716b99e5f04b192744e5f0cee"} Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.354639 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8730f858-5803-4a82-bf34-63a2b65ddebb","Type":"ContainerStarted","Data":"b3c5723439c54b52484da147388061384c1baa9fe410e5e95921142e07f40552"} Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.354713 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.355501 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"8edad4acc7bbc0725f8182f435365aa0e43a201d2dcf3506980bdfabf0a822f8"} Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.355541 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"7a08b44f7786e568d2c9a8f70a99e63859c1870f4fcff79010aec8c7fc3e4047"} Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.355550 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"d52025b4beb4761a846e3aee6da8b270c04c94cba3360a6bfe11211eda2bfa6d"} Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.355558 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"23d400d97a7f48e820eb6902778d24db3ff632f1fce5c5b2b004dee063b9588d"} Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.378344 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.1346506339999998 podStartE2EDuration="3.378326116s" podCreationTimestamp="2025-12-09 17:16:38 +0000 UTC" firstStartedPulling="2025-12-09 17:16:39.272550129 +0000 UTC m=+1185.263660772" lastFinishedPulling="2025-12-09 17:16:40.516225621 +0000 UTC m=+1186.507336254" observedRunningTime="2025-12-09 17:16:41.377004078 +0000 UTC m=+1187.368114711" watchObservedRunningTime="2025-12-09 17:16:41.378326116 +0000 UTC m=+1187.369436749" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.568504 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-jnkjx"] Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.570132 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.576118 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-sjvvc" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.578005 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.580783 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-jnkjx"] Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.627080 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6sh6\" (UniqueName: \"kubernetes.io/projected/87c31a2f-f8da-4391-91b7-16544aceaf18-kube-api-access-k6sh6\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.627133 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-db-sync-config-data\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.627171 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-config-data\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.627225 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-combined-ca-bundle\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.729005 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-db-sync-config-data\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.729073 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-config-data\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.729114 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-combined-ca-bundle\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.729274 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6sh6\" (UniqueName: \"kubernetes.io/projected/87c31a2f-f8da-4391-91b7-16544aceaf18-kube-api-access-k6sh6\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.733585 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-db-sync-config-data\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.735755 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-config-data\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.738734 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-combined-ca-bundle\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.750300 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6sh6\" (UniqueName: \"kubernetes.io/projected/87c31a2f-f8da-4391-91b7-16544aceaf18-kube-api-access-k6sh6\") pod \"glance-db-sync-jnkjx\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:41 crc kubenswrapper[4840]: I1209 17:16:41.905170 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jnkjx" Dec 09 17:16:42 crc kubenswrapper[4840]: I1209 17:16:42.484900 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-jnkjx"] Dec 09 17:16:43 crc kubenswrapper[4840]: I1209 17:16:43.371882 4840 generic.go:334] "Generic (PLEG): container finished" podID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" containerID="5244fe0fbc7b7d1f4ccadb212c2501526ed510a64f3173064b1051ad0057d3ec" exitCode=0 Dec 09 17:16:43 crc kubenswrapper[4840]: I1209 17:16:43.372200 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9b2bc342-2987-4fc2-b078-bc5aa00c063d","Type":"ContainerDied","Data":"5244fe0fbc7b7d1f4ccadb212c2501526ed510a64f3173064b1051ad0057d3ec"} Dec 09 17:16:43 crc kubenswrapper[4840]: I1209 17:16:43.376313 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jnkjx" event={"ID":"87c31a2f-f8da-4391-91b7-16544aceaf18","Type":"ContainerStarted","Data":"a2e843b771a8c53c52b993113aff139397c477ba2d0d570b5d5f4fc35a29b917"} Dec 09 17:16:43 crc kubenswrapper[4840]: I1209 17:16:43.388464 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"352fcdbaeb226e1710bc753a8b7bc04cc3e9f2961381bce1941bfcf9276f77f2"} Dec 09 17:16:44 crc kubenswrapper[4840]: I1209 17:16:44.405059 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"9386703182f826c2761766f320a0ed3fd9325b1d62892841e440c24861952d3d"} Dec 09 17:16:44 crc kubenswrapper[4840]: I1209 17:16:44.405362 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"adf7f9f5c1adfa0484ab764ff928312d65901f2c8019b27b461d6b5521562e55"} Dec 09 17:16:44 crc kubenswrapper[4840]: I1209 17:16:44.405376 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"85e3da198732c0854386736087ef4c8e43edeac2e010ccc6b5377180121761bc"} Dec 09 17:16:44 crc kubenswrapper[4840]: I1209 17:16:44.407661 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9b2bc342-2987-4fc2-b078-bc5aa00c063d","Type":"ContainerStarted","Data":"186cb344f06d15427e74197e22e6db35af1f6bca882d7001d746a9cfabd7d7d0"} Dec 09 17:16:44 crc kubenswrapper[4840]: I1209 17:16:44.407871 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 09 17:16:44 crc kubenswrapper[4840]: I1209 17:16:44.431888 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371958.422913 podStartE2EDuration="1m18.431862862s" podCreationTimestamp="2025-12-09 17:15:26 +0000 UTC" firstStartedPulling="2025-12-09 17:15:31.375132696 +0000 UTC m=+1117.366243329" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:16:44.431400359 +0000 UTC m=+1190.422511002" watchObservedRunningTime="2025-12-09 17:16:44.431862862 +0000 UTC m=+1190.422973495" Dec 09 17:16:45 crc kubenswrapper[4840]: I1209 17:16:45.518905 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cloudkitty-lokistack-ingester-0" podUID="b45f4212-4ee0-4679-b115-d8d231bf946d" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 09 17:16:46 crc kubenswrapper[4840]: I1209 17:16:46.434085 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"6f8d3cf5013f34ea0c66a7f9096d81e406c832f37f8265077a8c5e067840c211"} Dec 09 17:16:46 crc kubenswrapper[4840]: I1209 17:16:46.434396 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"95c9977197b80f7394bf224ede569fe17baa4db06f28a0901b56b85a278f6c90"} Dec 09 17:16:46 crc kubenswrapper[4840]: I1209 17:16:46.434409 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"a3353925ab4ae2a97ff1e36e9abe5939d91fe5828520c7c78bafc4390dbda445"} Dec 09 17:16:46 crc kubenswrapper[4840]: I1209 17:16:46.434418 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"12cdd36d6072a7ef918a32f8b7d3e47d7cb95de1975ae555aa79946ae552d98f"} Dec 09 17:16:46 crc kubenswrapper[4840]: I1209 17:16:46.434427 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"8718db1cd515873acbb96e328abfeb676cecf2dd0f5354943d254004511388c6"} Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.309462 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-v78xq" podUID="637ab881-6952-409f-8e9d-619aaf72fb51" containerName="ovn-controller" probeResult="failure" output=< Dec 09 17:16:47 crc kubenswrapper[4840]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 09 17:16:47 crc kubenswrapper[4840]: > Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.450040 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"ffab84ffd2d986f480f4fc49a8c1b1f55b16be37b53535493a1c1102e55497f2"} Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.450121 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534","Type":"ContainerStarted","Data":"bb00a90116a4fa7ffdc6b7f1526f08cef5f4690a88632c389579cc056b7ca16a"} Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.494299 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.839327615 podStartE2EDuration="44.494274684s" podCreationTimestamp="2025-12-09 17:16:03 +0000 UTC" firstStartedPulling="2025-12-09 17:16:38.59326279 +0000 UTC m=+1184.584373423" lastFinishedPulling="2025-12-09 17:16:45.248209859 +0000 UTC m=+1191.239320492" observedRunningTime="2025-12-09 17:16:47.489642861 +0000 UTC m=+1193.480753504" watchObservedRunningTime="2025-12-09 17:16:47.494274684 +0000 UTC m=+1193.485385317" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.772015 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-vtjtl"] Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.776699 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.778401 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.809089 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-vtjtl"] Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.873037 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-config\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.873135 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.873175 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.873299 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xqnk\" (UniqueName: \"kubernetes.io/projected/cc6d69d7-b580-4da6-8233-9e522a3674cd-kube-api-access-2xqnk\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.873382 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.873454 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.974779 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.974855 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.974897 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-config\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.974932 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.974953 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.975026 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xqnk\" (UniqueName: \"kubernetes.io/projected/cc6d69d7-b580-4da6-8233-9e522a3674cd-kube-api-access-2xqnk\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.975908 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.976026 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.976574 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.976599 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:47 crc kubenswrapper[4840]: I1209 17:16:47.978846 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-config\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:48 crc kubenswrapper[4840]: I1209 17:16:48.005769 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xqnk\" (UniqueName: \"kubernetes.io/projected/cc6d69d7-b580-4da6-8233-9e522a3674cd-kube-api-access-2xqnk\") pod \"dnsmasq-dns-6d5b6d6b67-vtjtl\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:48 crc kubenswrapper[4840]: I1209 17:16:48.101701 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:16:48 crc kubenswrapper[4840]: I1209 17:16:48.596911 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-vtjtl"] Dec 09 17:16:49 crc kubenswrapper[4840]: I1209 17:16:49.581677 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 09 17:16:49 crc kubenswrapper[4840]: I1209 17:16:49.586693 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 09 17:16:50 crc kubenswrapper[4840]: I1209 17:16:50.471864 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 09 17:16:52 crc kubenswrapper[4840]: I1209 17:16:52.323228 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-v78xq" podUID="637ab881-6952-409f-8e9d-619aaf72fb51" containerName="ovn-controller" probeResult="failure" output=< Dec 09 17:16:52 crc kubenswrapper[4840]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 09 17:16:52 crc kubenswrapper[4840]: > Dec 09 17:16:53 crc kubenswrapper[4840]: I1209 17:16:53.631324 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 17:16:53 crc kubenswrapper[4840]: I1209 17:16:53.631934 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="prometheus" containerID="cri-o://d49b34ac0e0ce74cc8312e488df73e9b24d6383702336af4a885df065a102656" gracePeriod=600 Dec 09 17:16:53 crc kubenswrapper[4840]: I1209 17:16:53.632462 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="thanos-sidecar" containerID="cri-o://ecda9951d590e5506700094ad4824525cf1564371b780555f9c7808f5d97d461" gracePeriod=600 Dec 09 17:16:53 crc kubenswrapper[4840]: I1209 17:16:53.632531 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="config-reloader" containerID="cri-o://99b80de4e52773d9dc5ef8a33fa6e1aa4c163d3f46ecb5d4fffab8195e3aca6b" gracePeriod=600 Dec 09 17:16:53 crc kubenswrapper[4840]: I1209 17:16:53.827532 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Dec 09 17:16:54 crc kubenswrapper[4840]: I1209 17:16:54.521266 4840 generic.go:334] "Generic (PLEG): container finished" podID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerID="ecda9951d590e5506700094ad4824525cf1564371b780555f9c7808f5d97d461" exitCode=0 Dec 09 17:16:54 crc kubenswrapper[4840]: I1209 17:16:54.521607 4840 generic.go:334] "Generic (PLEG): container finished" podID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerID="99b80de4e52773d9dc5ef8a33fa6e1aa4c163d3f46ecb5d4fffab8195e3aca6b" exitCode=0 Dec 09 17:16:54 crc kubenswrapper[4840]: I1209 17:16:54.521621 4840 generic.go:334] "Generic (PLEG): container finished" podID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerID="d49b34ac0e0ce74cc8312e488df73e9b24d6383702336af4a885df065a102656" exitCode=0 Dec 09 17:16:54 crc kubenswrapper[4840]: I1209 17:16:54.521332 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e90b0e97-f543-4d75-bb6d-8d96c7b3d663","Type":"ContainerDied","Data":"ecda9951d590e5506700094ad4824525cf1564371b780555f9c7808f5d97d461"} Dec 09 17:16:54 crc kubenswrapper[4840]: I1209 17:16:54.521664 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e90b0e97-f543-4d75-bb6d-8d96c7b3d663","Type":"ContainerDied","Data":"99b80de4e52773d9dc5ef8a33fa6e1aa4c163d3f46ecb5d4fffab8195e3aca6b"} Dec 09 17:16:54 crc kubenswrapper[4840]: I1209 17:16:54.521684 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e90b0e97-f543-4d75-bb6d-8d96c7b3d663","Type":"ContainerDied","Data":"d49b34ac0e0ce74cc8312e488df73e9b24d6383702336af4a885df065a102656"} Dec 09 17:16:54 crc kubenswrapper[4840]: I1209 17:16:54.581154 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.114:9090/-/ready\": dial tcp 10.217.0.114:9090: connect: connection refused" Dec 09 17:16:55 crc kubenswrapper[4840]: I1209 17:16:55.518810 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cloudkitty-lokistack-ingester-0" podUID="b45f4212-4ee0-4679-b115-d8d231bf946d" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 09 17:16:57 crc kubenswrapper[4840]: I1209 17:16:57.318492 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-v78xq" podUID="637ab881-6952-409f-8e9d-619aaf72fb51" containerName="ovn-controller" probeResult="failure" output=< Dec 09 17:16:57 crc kubenswrapper[4840]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 09 17:16:57 crc kubenswrapper[4840]: > Dec 09 17:16:57 crc kubenswrapper[4840]: I1209 17:16:57.861236 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.297367 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-24c4-account-create-update-htjw8"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.299102 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-24c4-account-create-update-htjw8" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.303475 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.313526 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-6lkf2"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.315097 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6lkf2" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.326805 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-24c4-account-create-update-htjw8"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.358785 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6lkf2"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.403307 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-1723-account-create-update-hmglv"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.408095 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1723-account-create-update-hmglv" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.415314 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.416003 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-1723-account-create-update-hmglv"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.440337 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b3dfd2c-a156-4f5f-b950-94623d183859-operator-scripts\") pod \"barbican-24c4-account-create-update-htjw8\" (UID: \"4b3dfd2c-a156-4f5f-b950-94623d183859\") " pod="openstack/barbican-24c4-account-create-update-htjw8" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.440380 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdrs5\" (UniqueName: \"kubernetes.io/projected/4b3dfd2c-a156-4f5f-b950-94623d183859-kube-api-access-mdrs5\") pod \"barbican-24c4-account-create-update-htjw8\" (UID: \"4b3dfd2c-a156-4f5f-b950-94623d183859\") " pod="openstack/barbican-24c4-account-create-update-htjw8" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.440410 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pxlp\" (UniqueName: \"kubernetes.io/projected/feb054c3-cc6f-4af4-9dce-0683f20ec01a-kube-api-access-8pxlp\") pod \"cinder-db-create-6lkf2\" (UID: \"feb054c3-cc6f-4af4-9dce-0683f20ec01a\") " pod="openstack/cinder-db-create-6lkf2" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.440544 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/feb054c3-cc6f-4af4-9dce-0683f20ec01a-operator-scripts\") pod \"cinder-db-create-6lkf2\" (UID: \"feb054c3-cc6f-4af4-9dce-0683f20ec01a\") " pod="openstack/cinder-db-create-6lkf2" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.442718 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-rwhks"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.443924 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rwhks" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.474720 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-rwhks"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.542468 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/feb054c3-cc6f-4af4-9dce-0683f20ec01a-operator-scripts\") pod \"cinder-db-create-6lkf2\" (UID: \"feb054c3-cc6f-4af4-9dce-0683f20ec01a\") " pod="openstack/cinder-db-create-6lkf2" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.542523 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b3dfd2c-a156-4f5f-b950-94623d183859-operator-scripts\") pod \"barbican-24c4-account-create-update-htjw8\" (UID: \"4b3dfd2c-a156-4f5f-b950-94623d183859\") " pod="openstack/barbican-24c4-account-create-update-htjw8" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.542546 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdrs5\" (UniqueName: \"kubernetes.io/projected/4b3dfd2c-a156-4f5f-b950-94623d183859-kube-api-access-mdrs5\") pod \"barbican-24c4-account-create-update-htjw8\" (UID: \"4b3dfd2c-a156-4f5f-b950-94623d183859\") " pod="openstack/barbican-24c4-account-create-update-htjw8" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.542576 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-678gk\" (UniqueName: \"kubernetes.io/projected/e1914945-6f71-4d08-8c84-d02706ed7b17-kube-api-access-678gk\") pod \"cinder-1723-account-create-update-hmglv\" (UID: \"e1914945-6f71-4d08-8c84-d02706ed7b17\") " pod="openstack/cinder-1723-account-create-update-hmglv" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.542597 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pxlp\" (UniqueName: \"kubernetes.io/projected/feb054c3-cc6f-4af4-9dce-0683f20ec01a-kube-api-access-8pxlp\") pod \"cinder-db-create-6lkf2\" (UID: \"feb054c3-cc6f-4af4-9dce-0683f20ec01a\") " pod="openstack/cinder-db-create-6lkf2" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.542669 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1914945-6f71-4d08-8c84-d02706ed7b17-operator-scripts\") pod \"cinder-1723-account-create-update-hmglv\" (UID: \"e1914945-6f71-4d08-8c84-d02706ed7b17\") " pod="openstack/cinder-1723-account-create-update-hmglv" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.542703 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/183c8e61-03c5-46d5-a906-7943bf183913-operator-scripts\") pod \"barbican-db-create-rwhks\" (UID: \"183c8e61-03c5-46d5-a906-7943bf183913\") " pod="openstack/barbican-db-create-rwhks" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.542734 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69l2z\" (UniqueName: \"kubernetes.io/projected/183c8e61-03c5-46d5-a906-7943bf183913-kube-api-access-69l2z\") pod \"barbican-db-create-rwhks\" (UID: \"183c8e61-03c5-46d5-a906-7943bf183913\") " pod="openstack/barbican-db-create-rwhks" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.543328 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/feb054c3-cc6f-4af4-9dce-0683f20ec01a-operator-scripts\") pod \"cinder-db-create-6lkf2\" (UID: \"feb054c3-cc6f-4af4-9dce-0683f20ec01a\") " pod="openstack/cinder-db-create-6lkf2" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.543367 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b3dfd2c-a156-4f5f-b950-94623d183859-operator-scripts\") pod \"barbican-24c4-account-create-update-htjw8\" (UID: \"4b3dfd2c-a156-4f5f-b950-94623d183859\") " pod="openstack/barbican-24c4-account-create-update-htjw8" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.570817 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-db-create-g4td9"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.571115 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdrs5\" (UniqueName: \"kubernetes.io/projected/4b3dfd2c-a156-4f5f-b950-94623d183859-kube-api-access-mdrs5\") pod \"barbican-24c4-account-create-update-htjw8\" (UID: \"4b3dfd2c-a156-4f5f-b950-94623d183859\") " pod="openstack/barbican-24c4-account-create-update-htjw8" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.572956 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-create-g4td9" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.579199 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pxlp\" (UniqueName: \"kubernetes.io/projected/feb054c3-cc6f-4af4-9dce-0683f20ec01a-kube-api-access-8pxlp\") pod \"cinder-db-create-6lkf2\" (UID: \"feb054c3-cc6f-4af4-9dce-0683f20ec01a\") " pod="openstack/cinder-db-create-6lkf2" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.584283 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-create-g4td9"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.616768 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-24c4-account-create-update-htjw8" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.632673 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6lkf2" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.634463 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-sjx4x"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.635695 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.637990 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.638273 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-ddftm" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.638532 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.638782 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.644104 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-678gk\" (UniqueName: \"kubernetes.io/projected/e1914945-6f71-4d08-8c84-d02706ed7b17-kube-api-access-678gk\") pod \"cinder-1723-account-create-update-hmglv\" (UID: \"e1914945-6f71-4d08-8c84-d02706ed7b17\") " pod="openstack/cinder-1723-account-create-update-hmglv" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.644213 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1914945-6f71-4d08-8c84-d02706ed7b17-operator-scripts\") pod \"cinder-1723-account-create-update-hmglv\" (UID: \"e1914945-6f71-4d08-8c84-d02706ed7b17\") " pod="openstack/cinder-1723-account-create-update-hmglv" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.644252 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/183c8e61-03c5-46d5-a906-7943bf183913-operator-scripts\") pod \"barbican-db-create-rwhks\" (UID: \"183c8e61-03c5-46d5-a906-7943bf183913\") " pod="openstack/barbican-db-create-rwhks" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.644288 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69l2z\" (UniqueName: \"kubernetes.io/projected/183c8e61-03c5-46d5-a906-7943bf183913-kube-api-access-69l2z\") pod \"barbican-db-create-rwhks\" (UID: \"183c8e61-03c5-46d5-a906-7943bf183913\") " pod="openstack/barbican-db-create-rwhks" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.645617 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1914945-6f71-4d08-8c84-d02706ed7b17-operator-scripts\") pod \"cinder-1723-account-create-update-hmglv\" (UID: \"e1914945-6f71-4d08-8c84-d02706ed7b17\") " pod="openstack/cinder-1723-account-create-update-hmglv" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.646136 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/183c8e61-03c5-46d5-a906-7943bf183913-operator-scripts\") pod \"barbican-db-create-rwhks\" (UID: \"183c8e61-03c5-46d5-a906-7943bf183913\") " pod="openstack/barbican-db-create-rwhks" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.653910 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-sjx4x"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.678938 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69l2z\" (UniqueName: \"kubernetes.io/projected/183c8e61-03c5-46d5-a906-7943bf183913-kube-api-access-69l2z\") pod \"barbican-db-create-rwhks\" (UID: \"183c8e61-03c5-46d5-a906-7943bf183913\") " pod="openstack/barbican-db-create-rwhks" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.683368 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-678gk\" (UniqueName: \"kubernetes.io/projected/e1914945-6f71-4d08-8c84-d02706ed7b17-kube-api-access-678gk\") pod \"cinder-1723-account-create-update-hmglv\" (UID: \"e1914945-6f71-4d08-8c84-d02706ed7b17\") " pod="openstack/cinder-1723-account-create-update-hmglv" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.697878 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5ce1-account-create-update-n5v6g"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.700500 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ce1-account-create-update-n5v6g" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.702638 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.721074 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5ce1-account-create-update-n5v6g"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.728559 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1723-account-create-update-hmglv" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.746055 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tgp9\" (UniqueName: \"kubernetes.io/projected/9cc949a4-49ca-42c2-b427-e8586dad8ebc-kube-api-access-9tgp9\") pod \"keystone-db-sync-sjx4x\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.746133 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-combined-ca-bundle\") pod \"keystone-db-sync-sjx4x\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.746169 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq2f8\" (UniqueName: \"kubernetes.io/projected/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-kube-api-access-fq2f8\") pod \"cloudkitty-db-create-g4td9\" (UID: \"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce\") " pod="openstack/cloudkitty-db-create-g4td9" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.746220 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-operator-scripts\") pod \"cloudkitty-db-create-g4td9\" (UID: \"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce\") " pod="openstack/cloudkitty-db-create-g4td9" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.746267 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-config-data\") pod \"keystone-db-sync-sjx4x\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.764899 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rwhks" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.786037 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-2tqf7"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.787145 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2tqf7" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.801637 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-905b-account-create-update-xmh8g"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.802751 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-905b-account-create-update-xmh8g" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.808299 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-db-secret" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.819002 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-905b-account-create-update-xmh8g"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.827521 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-2tqf7"] Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.848029 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-config-data\") pod \"keystone-db-sync-sjx4x\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.848079 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c269e4e2-0a9c-45ae-8a09-066ef4203036-operator-scripts\") pod \"neutron-5ce1-account-create-update-n5v6g\" (UID: \"c269e4e2-0a9c-45ae-8a09-066ef4203036\") " pod="openstack/neutron-5ce1-account-create-update-n5v6g" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.848167 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tgp9\" (UniqueName: \"kubernetes.io/projected/9cc949a4-49ca-42c2-b427-e8586dad8ebc-kube-api-access-9tgp9\") pod \"keystone-db-sync-sjx4x\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.848201 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-combined-ca-bundle\") pod \"keystone-db-sync-sjx4x\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.848232 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq2f8\" (UniqueName: \"kubernetes.io/projected/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-kube-api-access-fq2f8\") pod \"cloudkitty-db-create-g4td9\" (UID: \"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce\") " pod="openstack/cloudkitty-db-create-g4td9" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.848286 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgr9r\" (UniqueName: \"kubernetes.io/projected/c269e4e2-0a9c-45ae-8a09-066ef4203036-kube-api-access-sgr9r\") pod \"neutron-5ce1-account-create-update-n5v6g\" (UID: \"c269e4e2-0a9c-45ae-8a09-066ef4203036\") " pod="openstack/neutron-5ce1-account-create-update-n5v6g" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.848330 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-operator-scripts\") pod \"cloudkitty-db-create-g4td9\" (UID: \"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce\") " pod="openstack/cloudkitty-db-create-g4td9" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.849254 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-operator-scripts\") pod \"cloudkitty-db-create-g4td9\" (UID: \"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce\") " pod="openstack/cloudkitty-db-create-g4td9" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.864272 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-config-data\") pod \"keystone-db-sync-sjx4x\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.882702 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tgp9\" (UniqueName: \"kubernetes.io/projected/9cc949a4-49ca-42c2-b427-e8586dad8ebc-kube-api-access-9tgp9\") pod \"keystone-db-sync-sjx4x\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.895448 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-combined-ca-bundle\") pod \"keystone-db-sync-sjx4x\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.898606 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq2f8\" (UniqueName: \"kubernetes.io/projected/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-kube-api-access-fq2f8\") pod \"cloudkitty-db-create-g4td9\" (UID: \"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce\") " pod="openstack/cloudkitty-db-create-g4td9" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.949613 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82299\" (UniqueName: \"kubernetes.io/projected/63846955-b953-4eeb-9c6b-72a87b9740e8-kube-api-access-82299\") pod \"cloudkitty-905b-account-create-update-xmh8g\" (UID: \"63846955-b953-4eeb-9c6b-72a87b9740e8\") " pod="openstack/cloudkitty-905b-account-create-update-xmh8g" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.949701 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgr9r\" (UniqueName: \"kubernetes.io/projected/c269e4e2-0a9c-45ae-8a09-066ef4203036-kube-api-access-sgr9r\") pod \"neutron-5ce1-account-create-update-n5v6g\" (UID: \"c269e4e2-0a9c-45ae-8a09-066ef4203036\") " pod="openstack/neutron-5ce1-account-create-update-n5v6g" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.949771 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69a097ba-a134-4d5f-906b-0cdb275ff034-operator-scripts\") pod \"neutron-db-create-2tqf7\" (UID: \"69a097ba-a134-4d5f-906b-0cdb275ff034\") " pod="openstack/neutron-db-create-2tqf7" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.949830 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c269e4e2-0a9c-45ae-8a09-066ef4203036-operator-scripts\") pod \"neutron-5ce1-account-create-update-n5v6g\" (UID: \"c269e4e2-0a9c-45ae-8a09-066ef4203036\") " pod="openstack/neutron-5ce1-account-create-update-n5v6g" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.949863 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpdfq\" (UniqueName: \"kubernetes.io/projected/69a097ba-a134-4d5f-906b-0cdb275ff034-kube-api-access-jpdfq\") pod \"neutron-db-create-2tqf7\" (UID: \"69a097ba-a134-4d5f-906b-0cdb275ff034\") " pod="openstack/neutron-db-create-2tqf7" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.949891 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63846955-b953-4eeb-9c6b-72a87b9740e8-operator-scripts\") pod \"cloudkitty-905b-account-create-update-xmh8g\" (UID: \"63846955-b953-4eeb-9c6b-72a87b9740e8\") " pod="openstack/cloudkitty-905b-account-create-update-xmh8g" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.950712 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c269e4e2-0a9c-45ae-8a09-066ef4203036-operator-scripts\") pod \"neutron-5ce1-account-create-update-n5v6g\" (UID: \"c269e4e2-0a9c-45ae-8a09-066ef4203036\") " pod="openstack/neutron-5ce1-account-create-update-n5v6g" Dec 09 17:16:58 crc kubenswrapper[4840]: I1209 17:16:58.993438 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgr9r\" (UniqueName: \"kubernetes.io/projected/c269e4e2-0a9c-45ae-8a09-066ef4203036-kube-api-access-sgr9r\") pod \"neutron-5ce1-account-create-update-n5v6g\" (UID: \"c269e4e2-0a9c-45ae-8a09-066ef4203036\") " pod="openstack/neutron-5ce1-account-create-update-n5v6g" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.051816 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpdfq\" (UniqueName: \"kubernetes.io/projected/69a097ba-a134-4d5f-906b-0cdb275ff034-kube-api-access-jpdfq\") pod \"neutron-db-create-2tqf7\" (UID: \"69a097ba-a134-4d5f-906b-0cdb275ff034\") " pod="openstack/neutron-db-create-2tqf7" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.052161 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63846955-b953-4eeb-9c6b-72a87b9740e8-operator-scripts\") pod \"cloudkitty-905b-account-create-update-xmh8g\" (UID: \"63846955-b953-4eeb-9c6b-72a87b9740e8\") " pod="openstack/cloudkitty-905b-account-create-update-xmh8g" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.052388 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82299\" (UniqueName: \"kubernetes.io/projected/63846955-b953-4eeb-9c6b-72a87b9740e8-kube-api-access-82299\") pod \"cloudkitty-905b-account-create-update-xmh8g\" (UID: \"63846955-b953-4eeb-9c6b-72a87b9740e8\") " pod="openstack/cloudkitty-905b-account-create-update-xmh8g" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.052564 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69a097ba-a134-4d5f-906b-0cdb275ff034-operator-scripts\") pod \"neutron-db-create-2tqf7\" (UID: \"69a097ba-a134-4d5f-906b-0cdb275ff034\") " pod="openstack/neutron-db-create-2tqf7" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.053091 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63846955-b953-4eeb-9c6b-72a87b9740e8-operator-scripts\") pod \"cloudkitty-905b-account-create-update-xmh8g\" (UID: \"63846955-b953-4eeb-9c6b-72a87b9740e8\") " pod="openstack/cloudkitty-905b-account-create-update-xmh8g" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.053264 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69a097ba-a134-4d5f-906b-0cdb275ff034-operator-scripts\") pod \"neutron-db-create-2tqf7\" (UID: \"69a097ba-a134-4d5f-906b-0cdb275ff034\") " pod="openstack/neutron-db-create-2tqf7" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.058217 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-create-g4td9" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.065926 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.076182 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82299\" (UniqueName: \"kubernetes.io/projected/63846955-b953-4eeb-9c6b-72a87b9740e8-kube-api-access-82299\") pod \"cloudkitty-905b-account-create-update-xmh8g\" (UID: \"63846955-b953-4eeb-9c6b-72a87b9740e8\") " pod="openstack/cloudkitty-905b-account-create-update-xmh8g" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.078677 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ce1-account-create-update-n5v6g" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.082394 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpdfq\" (UniqueName: \"kubernetes.io/projected/69a097ba-a134-4d5f-906b-0cdb275ff034-kube-api-access-jpdfq\") pod \"neutron-db-create-2tqf7\" (UID: \"69a097ba-a134-4d5f-906b-0cdb275ff034\") " pod="openstack/neutron-db-create-2tqf7" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.109353 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2tqf7" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.121156 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-905b-account-create-update-xmh8g" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.583573 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.114:9090/-/ready\": dial tcp 10.217.0.114:9090: connect: connection refused" Dec 09 17:16:59 crc kubenswrapper[4840]: I1209 17:16:59.612481 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" event={"ID":"cc6d69d7-b580-4da6-8233-9e522a3674cd","Type":"ContainerStarted","Data":"576df8ae96d2671208e606c453cfac71ade1c3af6a0c29287b0d6e5a04127511"} Dec 09 17:16:59 crc kubenswrapper[4840]: E1209 17:16:59.800671 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Dec 09 17:16:59 crc kubenswrapper[4840]: E1209 17:16:59.800833 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k6sh6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-jnkjx_openstack(87c31a2f-f8da-4391-91b7-16544aceaf18): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 17:16:59 crc kubenswrapper[4840]: E1209 17:16:59.802136 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-jnkjx" podUID="87c31a2f-f8da-4391-91b7-16544aceaf18" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.264093 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-create-g4td9"] Dec 09 17:17:00 crc kubenswrapper[4840]: W1209 17:17:00.269719 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeed8dbd1_0d39_4a2d_aab1_f30ad04d98ce.slice/crio-8ac579fdd6432fc970912ef44cdf680536e4a40f9d139ea8f24b73ba5f64410a WatchSource:0}: Error finding container 8ac579fdd6432fc970912ef44cdf680536e4a40f9d139ea8f24b73ba5f64410a: Status 404 returned error can't find the container with id 8ac579fdd6432fc970912ef44cdf680536e4a40f9d139ea8f24b73ba5f64410a Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.355849 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.485556 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config\") pod \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.485621 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-tls-assets\") pod \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.485709 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-prometheus-metric-storage-rulefiles-0\") pod \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.485891 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978\") pod \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.485920 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-thanos-prometheus-http-client-file\") pod \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.485943 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config-out\") pod \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.486080 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-web-config\") pod \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.486130 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tksfr\" (UniqueName: \"kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-kube-api-access-tksfr\") pod \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\" (UID: \"e90b0e97-f543-4d75-bb6d-8d96c7b3d663\") " Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.493500 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config" (OuterVolumeSpecName: "config") pod "e90b0e97-f543-4d75-bb6d-8d96c7b3d663" (UID: "e90b0e97-f543-4d75-bb6d-8d96c7b3d663"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.494166 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "e90b0e97-f543-4d75-bb6d-8d96c7b3d663" (UID: "e90b0e97-f543-4d75-bb6d-8d96c7b3d663"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.504246 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "e90b0e97-f543-4d75-bb6d-8d96c7b3d663" (UID: "e90b0e97-f543-4d75-bb6d-8d96c7b3d663"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.508292 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-kube-api-access-tksfr" (OuterVolumeSpecName: "kube-api-access-tksfr") pod "e90b0e97-f543-4d75-bb6d-8d96c7b3d663" (UID: "e90b0e97-f543-4d75-bb6d-8d96c7b3d663"). InnerVolumeSpecName "kube-api-access-tksfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.522136 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config-out" (OuterVolumeSpecName: "config-out") pod "e90b0e97-f543-4d75-bb6d-8d96c7b3d663" (UID: "e90b0e97-f543-4d75-bb6d-8d96c7b3d663"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.523220 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "e90b0e97-f543-4d75-bb6d-8d96c7b3d663" (UID: "e90b0e97-f543-4d75-bb6d-8d96c7b3d663"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.545442 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-sjx4x"] Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.551205 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-web-config" (OuterVolumeSpecName: "web-config") pod "e90b0e97-f543-4d75-bb6d-8d96c7b3d663" (UID: "e90b0e97-f543-4d75-bb6d-8d96c7b3d663"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.552898 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "e90b0e97-f543-4d75-bb6d-8d96c7b3d663" (UID: "e90b0e97-f543-4d75-bb6d-8d96c7b3d663"). InnerVolumeSpecName "pvc-041ac8f0-de82-421c-9846-9af6d3836978". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.560577 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-24c4-account-create-update-htjw8"] Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.588781 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.588813 4840 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-tls-assets\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.588823 4840 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.588853 4840 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-041ac8f0-de82-421c-9846-9af6d3836978\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978\") on node \"crc\" " Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.588867 4840 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.588876 4840 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-config-out\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.588884 4840 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-web-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.588893 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tksfr\" (UniqueName: \"kubernetes.io/projected/e90b0e97-f543-4d75-bb6d-8d96c7b3d663-kube-api-access-tksfr\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.618138 4840 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.619914 4840 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-041ac8f0-de82-421c-9846-9af6d3836978" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978") on node "crc" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.636309 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-24c4-account-create-update-htjw8" event={"ID":"4b3dfd2c-a156-4f5f-b950-94623d183859","Type":"ContainerStarted","Data":"402731aa5d0bb1e608f86ac0ce089573904acab6371be8b031e044280ac245e0"} Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.638061 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-create-g4td9" event={"ID":"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce","Type":"ContainerStarted","Data":"4e696a39b1d8fa2d4a0db7b9d6f77f08361f4a70cc0f994c22f0909d3144485a"} Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.638176 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-create-g4td9" event={"ID":"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce","Type":"ContainerStarted","Data":"8ac579fdd6432fc970912ef44cdf680536e4a40f9d139ea8f24b73ba5f64410a"} Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.642244 4840 generic.go:334] "Generic (PLEG): container finished" podID="cc6d69d7-b580-4da6-8233-9e522a3674cd" containerID="d3f8bd56dbb7fe79ac20380adcc8b6b91d6346d5e9312a59d1c57072dea569a3" exitCode=0 Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.642460 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" event={"ID":"cc6d69d7-b580-4da6-8233-9e522a3674cd","Type":"ContainerDied","Data":"d3f8bd56dbb7fe79ac20380adcc8b6b91d6346d5e9312a59d1c57072dea569a3"} Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.670380 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-db-create-g4td9" podStartSLOduration=2.6703621 podStartE2EDuration="2.6703621s" podCreationTimestamp="2025-12-09 17:16:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:00.666589921 +0000 UTC m=+1206.657700554" watchObservedRunningTime="2025-12-09 17:17:00.6703621 +0000 UTC m=+1206.661472733" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.676861 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.676864 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e90b0e97-f543-4d75-bb6d-8d96c7b3d663","Type":"ContainerDied","Data":"85fe337a4ed08a14d08de3b6fac86b7d5fe320f46633b88e075256ca9d971826"} Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.677010 4840 scope.go:117] "RemoveContainer" containerID="ecda9951d590e5506700094ad4824525cf1564371b780555f9c7808f5d97d461" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.687127 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sjx4x" event={"ID":"9cc949a4-49ca-42c2-b427-e8586dad8ebc","Type":"ContainerStarted","Data":"8613b643d9b7b1c7efc530c0a61e26c086de93a13fb31623d4ace4c7690b38c4"} Dec 09 17:17:00 crc kubenswrapper[4840]: E1209 17:17:00.687530 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-jnkjx" podUID="87c31a2f-f8da-4391-91b7-16544aceaf18" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.690037 4840 reconciler_common.go:293] "Volume detached for volume \"pvc-041ac8f0-de82-421c-9846-9af6d3836978\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:00 crc kubenswrapper[4840]: W1209 17:17:00.726734 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod69a097ba_a134_4d5f_906b_0cdb275ff034.slice/crio-774af6998dd861dad0c6242eb3fb324059c5c089404551da3ea846f580a733c1 WatchSource:0}: Error finding container 774af6998dd861dad0c6242eb3fb324059c5c089404551da3ea846f580a733c1: Status 404 returned error can't find the container with id 774af6998dd861dad0c6242eb3fb324059c5c089404551da3ea846f580a733c1 Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.741504 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-2tqf7"] Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.782630 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-1723-account-create-update-hmglv"] Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.813894 4840 scope.go:117] "RemoveContainer" containerID="99b80de4e52773d9dc5ef8a33fa6e1aa4c163d3f46ecb5d4fffab8195e3aca6b" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.889951 4840 scope.go:117] "RemoveContainer" containerID="d49b34ac0e0ce74cc8312e488df73e9b24d6383702336af4a885df065a102656" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.927427 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-905b-account-create-update-xmh8g"] Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.936737 4840 scope.go:117] "RemoveContainer" containerID="bec60d7aeedceafb53803205ef79d83eae1ec02527856d1bc99add536c2e08c8" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.948118 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.961179 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.975404 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6lkf2"] Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.984030 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 17:17:00 crc kubenswrapper[4840]: E1209 17:17:00.984426 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="thanos-sidecar" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.984441 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="thanos-sidecar" Dec 09 17:17:00 crc kubenswrapper[4840]: E1209 17:17:00.984459 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="init-config-reloader" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.984465 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="init-config-reloader" Dec 09 17:17:00 crc kubenswrapper[4840]: E1209 17:17:00.984490 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="config-reloader" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.984496 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="config-reloader" Dec 09 17:17:00 crc kubenswrapper[4840]: E1209 17:17:00.984516 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="prometheus" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.984522 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="prometheus" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.984679 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="thanos-sidecar" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.984701 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="config-reloader" Dec 09 17:17:00 crc kubenswrapper[4840]: I1209 17:17:00.984716 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" containerName="prometheus" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.008709 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.012603 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.019019 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-d4x6z" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.019234 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.019392 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.019531 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.024630 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.024870 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Dec 09 17:17:01 crc kubenswrapper[4840]: W1209 17:17:01.025692 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod183c8e61_03c5_46d5_a906_7943bf183913.slice/crio-b5c6dc672f55c07c11ddc0d3984e5c834645ff8d77ebf0db219c98d57acbee40 WatchSource:0}: Error finding container b5c6dc672f55c07c11ddc0d3984e5c834645ff8d77ebf0db219c98d57acbee40: Status 404 returned error can't find the container with id b5c6dc672f55c07c11ddc0d3984e5c834645ff8d77ebf0db219c98d57acbee40 Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.034655 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.035494 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-rwhks"] Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.068875 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5ce1-account-create-update-n5v6g"] Dec 09 17:17:01 crc kubenswrapper[4840]: W1209 17:17:01.071074 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc269e4e2_0a9c_45ae_8a09_066ef4203036.slice/crio-275b328a2b442dbb3ee62f7ac505e796f148129f4149f6d4fe0e0274128f948b WatchSource:0}: Error finding container 275b328a2b442dbb3ee62f7ac505e796f148129f4149f6d4fe0e0274128f948b: Status 404 returned error can't find the container with id 275b328a2b442dbb3ee62f7ac505e796f148129f4149f6d4fe0e0274128f948b Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.112520 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.112552 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.112599 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.112614 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.112682 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.112705 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-041ac8f0-de82-421c-9846-9af6d3836978\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.112723 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.112764 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-config\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.112789 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btq6z\" (UniqueName: \"kubernetes.io/projected/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-kube-api-access-btq6z\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.112806 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.112855 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.216290 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.216338 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-041ac8f0-de82-421c-9846-9af6d3836978\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.216365 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.216400 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-config\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.216428 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btq6z\" (UniqueName: \"kubernetes.io/projected/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-kube-api-access-btq6z\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.216447 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.216499 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.216529 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.216547 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.216592 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.216610 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.231072 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.286713 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.289394 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.291932 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.301771 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.302911 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.309604 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.309727 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-041ac8f0-de82-421c-9846-9af6d3836978\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fdb12b4bbbb8ef13fe38e41860e07d7602896cea5ea735efd7d4e9b73bcc1e4d/globalmount\"" pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.309599 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.317651 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.327434 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-config\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.328011 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btq6z\" (UniqueName: \"kubernetes.io/projected/edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4-kube-api-access-btq6z\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.428412 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-041ac8f0-de82-421c-9846-9af6d3836978\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-041ac8f0-de82-421c-9846-9af6d3836978\") pod \"prometheus-metric-storage-0\" (UID: \"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4\") " pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.647401 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.699115 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rwhks" event={"ID":"183c8e61-03c5-46d5-a906-7943bf183913","Type":"ContainerStarted","Data":"dc78e48897ed6a351f5b61bebcd6eb792801f6cb3a9544d86ee7cfb5a36cecb1"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.699169 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rwhks" event={"ID":"183c8e61-03c5-46d5-a906-7943bf183913","Type":"ContainerStarted","Data":"b5c6dc672f55c07c11ddc0d3984e5c834645ff8d77ebf0db219c98d57acbee40"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.702597 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ce1-account-create-update-n5v6g" event={"ID":"c269e4e2-0a9c-45ae-8a09-066ef4203036","Type":"ContainerStarted","Data":"d4c71d14dc4efa3565f3b288922fa68090597e8ae09cc54bd61b42cc95aa7ac1"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.702633 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ce1-account-create-update-n5v6g" event={"ID":"c269e4e2-0a9c-45ae-8a09-066ef4203036","Type":"ContainerStarted","Data":"275b328a2b442dbb3ee62f7ac505e796f148129f4149f6d4fe0e0274128f948b"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.705493 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" event={"ID":"cc6d69d7-b580-4da6-8233-9e522a3674cd","Type":"ContainerStarted","Data":"badf747f4c324eb05e94248b4d2d4500b35c1e5287a088f8422e582c0fe04b0b"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.706217 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.714318 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1723-account-create-update-hmglv" event={"ID":"e1914945-6f71-4d08-8c84-d02706ed7b17","Type":"ContainerStarted","Data":"76161ed2ac33ef80b5875f7301526de119c4ae1a8188eb01e4a1b05d12bf7ad5"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.714367 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1723-account-create-update-hmglv" event={"ID":"e1914945-6f71-4d08-8c84-d02706ed7b17","Type":"ContainerStarted","Data":"61c56a73b890bb3953422c7194b5c8b8bc77ffc507612d6b74d6a7cdb0e5d012"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.719504 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-rwhks" podStartSLOduration=3.7194860800000002 podStartE2EDuration="3.71948608s" podCreationTimestamp="2025-12-09 17:16:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:01.715428383 +0000 UTC m=+1207.706539016" watchObservedRunningTime="2025-12-09 17:17:01.71948608 +0000 UTC m=+1207.710596713" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.736597 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-24c4-account-create-update-htjw8" event={"ID":"4b3dfd2c-a156-4f5f-b950-94623d183859","Type":"ContainerStarted","Data":"2ffc8cfdcd5e20f89923871bf804e4cd8179dc6d6963337a4300d9ba7c5fdc4d"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.743588 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" podStartSLOduration=14.743570973 podStartE2EDuration="14.743570973s" podCreationTimestamp="2025-12-09 17:16:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:01.73513192 +0000 UTC m=+1207.726242553" watchObservedRunningTime="2025-12-09 17:17:01.743570973 +0000 UTC m=+1207.734681606" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.751909 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6lkf2" event={"ID":"feb054c3-cc6f-4af4-9dce-0683f20ec01a","Type":"ContainerStarted","Data":"baec74f700582d39ae9a8906958e9199a691c9d779d4250aed41a4b79ebefb7e"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.751954 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6lkf2" event={"ID":"feb054c3-cc6f-4af4-9dce-0683f20ec01a","Type":"ContainerStarted","Data":"5afda73f8d4e28fcc93ec9a9522cb632323a2d2596200c62873723f1cf31101d"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.757291 4840 generic.go:334] "Generic (PLEG): container finished" podID="eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce" containerID="4e696a39b1d8fa2d4a0db7b9d6f77f08361f4a70cc0f994c22f0909d3144485a" exitCode=0 Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.757379 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-create-g4td9" event={"ID":"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce","Type":"ContainerDied","Data":"4e696a39b1d8fa2d4a0db7b9d6f77f08361f4a70cc0f994c22f0909d3144485a"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.759748 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-905b-account-create-update-xmh8g" event={"ID":"63846955-b953-4eeb-9c6b-72a87b9740e8","Type":"ContainerStarted","Data":"fc75f4e17eed2b3d22906f8eaec1684e6a9eb84bbee122247d05128482621aca"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.759795 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-905b-account-create-update-xmh8g" event={"ID":"63846955-b953-4eeb-9c6b-72a87b9740e8","Type":"ContainerStarted","Data":"2e29b8c598a13bd6e418f03fcf01d9305fd2bd66e5342f19940b04fe24d02875"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.763487 4840 generic.go:334] "Generic (PLEG): container finished" podID="69a097ba-a134-4d5f-906b-0cdb275ff034" containerID="54e8239b5ec2ff95ceb85b476086e090ff7de7c80dbe5bcc1278a5d07674b89b" exitCode=0 Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.763524 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2tqf7" event={"ID":"69a097ba-a134-4d5f-906b-0cdb275ff034","Type":"ContainerDied","Data":"54e8239b5ec2ff95ceb85b476086e090ff7de7c80dbe5bcc1278a5d07674b89b"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.763540 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2tqf7" event={"ID":"69a097ba-a134-4d5f-906b-0cdb275ff034","Type":"ContainerStarted","Data":"774af6998dd861dad0c6242eb3fb324059c5c089404551da3ea846f580a733c1"} Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.765033 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5ce1-account-create-update-n5v6g" podStartSLOduration=3.764468015 podStartE2EDuration="3.764468015s" podCreationTimestamp="2025-12-09 17:16:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:01.756858786 +0000 UTC m=+1207.747969419" watchObservedRunningTime="2025-12-09 17:17:01.764468015 +0000 UTC m=+1207.755578648" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.804032 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-1723-account-create-update-hmglv" podStartSLOduration=3.804012204 podStartE2EDuration="3.804012204s" podCreationTimestamp="2025-12-09 17:16:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:01.789369552 +0000 UTC m=+1207.780480185" watchObservedRunningTime="2025-12-09 17:17:01.804012204 +0000 UTC m=+1207.795122837" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.809927 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-24c4-account-create-update-htjw8" podStartSLOduration=3.8099184839999998 podStartE2EDuration="3.809918484s" podCreationTimestamp="2025-12-09 17:16:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:01.80876211 +0000 UTC m=+1207.799872733" watchObservedRunningTime="2025-12-09 17:17:01.809918484 +0000 UTC m=+1207.801029117" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.871555 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-905b-account-create-update-xmh8g" podStartSLOduration=3.871540338 podStartE2EDuration="3.871540338s" podCreationTimestamp="2025-12-09 17:16:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:01.868604184 +0000 UTC m=+1207.859714827" watchObservedRunningTime="2025-12-09 17:17:01.871540338 +0000 UTC m=+1207.862650971" Dec 09 17:17:01 crc kubenswrapper[4840]: I1209 17:17:01.895934 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-6lkf2" podStartSLOduration=3.89591684 podStartE2EDuration="3.89591684s" podCreationTimestamp="2025-12-09 17:16:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:01.894332364 +0000 UTC m=+1207.885442997" watchObservedRunningTime="2025-12-09 17:17:01.89591684 +0000 UTC m=+1207.887027473" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.201741 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 17:17:02 crc kubenswrapper[4840]: W1209 17:17:02.206059 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podedd5bd8a_5838_4ddc_b0a6_17d86a90e8e4.slice/crio-283ae6f26b0ad6abbb975c7215a30f1d558c9d128b06083e172bb2db205604c9 WatchSource:0}: Error finding container 283ae6f26b0ad6abbb975c7215a30f1d558c9d128b06083e172bb2db205604c9: Status 404 returned error can't find the container with id 283ae6f26b0ad6abbb975c7215a30f1d558c9d128b06083e172bb2db205604c9 Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.306573 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-v78xq" podUID="637ab881-6952-409f-8e9d-619aaf72fb51" containerName="ovn-controller" probeResult="failure" output=< Dec 09 17:17:02 crc kubenswrapper[4840]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 09 17:17:02 crc kubenswrapper[4840]: > Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.346890 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.348424 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-6vxgb" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.666560 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e90b0e97-f543-4d75-bb6d-8d96c7b3d663" path="/var/lib/kubelet/pods/e90b0e97-f543-4d75-bb6d-8d96c7b3d663/volumes" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.667519 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-v78xq-config-vp2k7"] Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.671696 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-v78xq-config-vp2k7"] Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.671838 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.675290 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.757771 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-scripts\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.758103 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-additional-scripts\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.758155 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-log-ovn\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.758197 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.758228 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run-ovn\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.758296 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx84j\" (UniqueName: \"kubernetes.io/projected/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-kube-api-access-vx84j\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.772659 4840 generic.go:334] "Generic (PLEG): container finished" podID="183c8e61-03c5-46d5-a906-7943bf183913" containerID="dc78e48897ed6a351f5b61bebcd6eb792801f6cb3a9544d86ee7cfb5a36cecb1" exitCode=0 Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.772755 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rwhks" event={"ID":"183c8e61-03c5-46d5-a906-7943bf183913","Type":"ContainerDied","Data":"dc78e48897ed6a351f5b61bebcd6eb792801f6cb3a9544d86ee7cfb5a36cecb1"} Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.773948 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4","Type":"ContainerStarted","Data":"283ae6f26b0ad6abbb975c7215a30f1d558c9d128b06083e172bb2db205604c9"} Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.775451 4840 generic.go:334] "Generic (PLEG): container finished" podID="c269e4e2-0a9c-45ae-8a09-066ef4203036" containerID="d4c71d14dc4efa3565f3b288922fa68090597e8ae09cc54bd61b42cc95aa7ac1" exitCode=0 Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.775522 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ce1-account-create-update-n5v6g" event={"ID":"c269e4e2-0a9c-45ae-8a09-066ef4203036","Type":"ContainerDied","Data":"d4c71d14dc4efa3565f3b288922fa68090597e8ae09cc54bd61b42cc95aa7ac1"} Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.777455 4840 generic.go:334] "Generic (PLEG): container finished" podID="63846955-b953-4eeb-9c6b-72a87b9740e8" containerID="fc75f4e17eed2b3d22906f8eaec1684e6a9eb84bbee122247d05128482621aca" exitCode=0 Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.777581 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-905b-account-create-update-xmh8g" event={"ID":"63846955-b953-4eeb-9c6b-72a87b9740e8","Type":"ContainerDied","Data":"fc75f4e17eed2b3d22906f8eaec1684e6a9eb84bbee122247d05128482621aca"} Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.779216 4840 generic.go:334] "Generic (PLEG): container finished" podID="e1914945-6f71-4d08-8c84-d02706ed7b17" containerID="76161ed2ac33ef80b5875f7301526de119c4ae1a8188eb01e4a1b05d12bf7ad5" exitCode=0 Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.779281 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1723-account-create-update-hmglv" event={"ID":"e1914945-6f71-4d08-8c84-d02706ed7b17","Type":"ContainerDied","Data":"76161ed2ac33ef80b5875f7301526de119c4ae1a8188eb01e4a1b05d12bf7ad5"} Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.780773 4840 generic.go:334] "Generic (PLEG): container finished" podID="4b3dfd2c-a156-4f5f-b950-94623d183859" containerID="2ffc8cfdcd5e20f89923871bf804e4cd8179dc6d6963337a4300d9ba7c5fdc4d" exitCode=0 Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.780826 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-24c4-account-create-update-htjw8" event={"ID":"4b3dfd2c-a156-4f5f-b950-94623d183859","Type":"ContainerDied","Data":"2ffc8cfdcd5e20f89923871bf804e4cd8179dc6d6963337a4300d9ba7c5fdc4d"} Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.783383 4840 generic.go:334] "Generic (PLEG): container finished" podID="feb054c3-cc6f-4af4-9dce-0683f20ec01a" containerID="baec74f700582d39ae9a8906958e9199a691c9d779d4250aed41a4b79ebefb7e" exitCode=0 Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.783511 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6lkf2" event={"ID":"feb054c3-cc6f-4af4-9dce-0683f20ec01a","Type":"ContainerDied","Data":"baec74f700582d39ae9a8906958e9199a691c9d779d4250aed41a4b79ebefb7e"} Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.861802 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.861852 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run-ovn\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.861910 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx84j\" (UniqueName: \"kubernetes.io/projected/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-kube-api-access-vx84j\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.862030 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-scripts\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.862047 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-additional-scripts\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.862104 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-log-ovn\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.863443 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.863520 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run-ovn\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.864457 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-additional-scripts\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.864518 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-log-ovn\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.867115 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-scripts\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:02 crc kubenswrapper[4840]: I1209 17:17:02.887066 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx84j\" (UniqueName: \"kubernetes.io/projected/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-kube-api-access-vx84j\") pod \"ovn-controller-v78xq-config-vp2k7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.004610 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.200101 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-create-g4td9" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.238803 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2tqf7" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.275977 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-operator-scripts\") pod \"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce\" (UID: \"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce\") " Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.276067 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fq2f8\" (UniqueName: \"kubernetes.io/projected/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-kube-api-access-fq2f8\") pod \"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce\" (UID: \"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce\") " Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.277278 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce" (UID: "eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.290565 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-kube-api-access-fq2f8" (OuterVolumeSpecName: "kube-api-access-fq2f8") pod "eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce" (UID: "eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce"). InnerVolumeSpecName "kube-api-access-fq2f8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.378419 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69a097ba-a134-4d5f-906b-0cdb275ff034-operator-scripts\") pod \"69a097ba-a134-4d5f-906b-0cdb275ff034\" (UID: \"69a097ba-a134-4d5f-906b-0cdb275ff034\") " Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.378522 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpdfq\" (UniqueName: \"kubernetes.io/projected/69a097ba-a134-4d5f-906b-0cdb275ff034-kube-api-access-jpdfq\") pod \"69a097ba-a134-4d5f-906b-0cdb275ff034\" (UID: \"69a097ba-a134-4d5f-906b-0cdb275ff034\") " Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.378985 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.379013 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fq2f8\" (UniqueName: \"kubernetes.io/projected/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce-kube-api-access-fq2f8\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.379356 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69a097ba-a134-4d5f-906b-0cdb275ff034-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "69a097ba-a134-4d5f-906b-0cdb275ff034" (UID: "69a097ba-a134-4d5f-906b-0cdb275ff034"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.383612 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69a097ba-a134-4d5f-906b-0cdb275ff034-kube-api-access-jpdfq" (OuterVolumeSpecName: "kube-api-access-jpdfq") pod "69a097ba-a134-4d5f-906b-0cdb275ff034" (UID: "69a097ba-a134-4d5f-906b-0cdb275ff034"). InnerVolumeSpecName "kube-api-access-jpdfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.480788 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpdfq\" (UniqueName: \"kubernetes.io/projected/69a097ba-a134-4d5f-906b-0cdb275ff034-kube-api-access-jpdfq\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.480824 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69a097ba-a134-4d5f-906b-0cdb275ff034-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.556954 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-v78xq-config-vp2k7"] Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.794509 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-create-g4td9" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.795608 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-create-g4td9" event={"ID":"eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce","Type":"ContainerDied","Data":"8ac579fdd6432fc970912ef44cdf680536e4a40f9d139ea8f24b73ba5f64410a"} Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.795646 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ac579fdd6432fc970912ef44cdf680536e4a40f9d139ea8f24b73ba5f64410a" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.800303 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2tqf7" Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.805140 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2tqf7" event={"ID":"69a097ba-a134-4d5f-906b-0cdb275ff034","Type":"ContainerDied","Data":"774af6998dd861dad0c6242eb3fb324059c5c089404551da3ea846f580a733c1"} Dec 09 17:17:03 crc kubenswrapper[4840]: I1209 17:17:03.805181 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="774af6998dd861dad0c6242eb3fb324059c5c089404551da3ea846f580a733c1" Dec 09 17:17:05 crc kubenswrapper[4840]: I1209 17:17:05.519437 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 09 17:17:05 crc kubenswrapper[4840]: I1209 17:17:05.818456 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4","Type":"ContainerStarted","Data":"181bb0d18ca2eba399cf6a2b8c6dc7c110edc4dbb23113680276ac559413aa95"} Dec 09 17:17:06 crc kubenswrapper[4840]: W1209 17:17:06.438179 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9e6ad3c_ffa6_4584_accb_1003fdcf18d7.slice/crio-67ba6b3da9638f546d4eb93f05d79d22d0d7e417db5612ef4d6d192cd901ef1c WatchSource:0}: Error finding container 67ba6b3da9638f546d4eb93f05d79d22d0d7e417db5612ef4d6d192cd901ef1c: Status 404 returned error can't find the container with id 67ba6b3da9638f546d4eb93f05d79d22d0d7e417db5612ef4d6d192cd901ef1c Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.665023 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rwhks" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.700890 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-24c4-account-create-update-htjw8" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.724016 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6lkf2" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.731704 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-905b-account-create-update-xmh8g" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.755490 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1723-account-create-update-hmglv" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.774287 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69l2z\" (UniqueName: \"kubernetes.io/projected/183c8e61-03c5-46d5-a906-7943bf183913-kube-api-access-69l2z\") pod \"183c8e61-03c5-46d5-a906-7943bf183913\" (UID: \"183c8e61-03c5-46d5-a906-7943bf183913\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.774461 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdrs5\" (UniqueName: \"kubernetes.io/projected/4b3dfd2c-a156-4f5f-b950-94623d183859-kube-api-access-mdrs5\") pod \"4b3dfd2c-a156-4f5f-b950-94623d183859\" (UID: \"4b3dfd2c-a156-4f5f-b950-94623d183859\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.774550 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/183c8e61-03c5-46d5-a906-7943bf183913-operator-scripts\") pod \"183c8e61-03c5-46d5-a906-7943bf183913\" (UID: \"183c8e61-03c5-46d5-a906-7943bf183913\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.774613 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b3dfd2c-a156-4f5f-b950-94623d183859-operator-scripts\") pod \"4b3dfd2c-a156-4f5f-b950-94623d183859\" (UID: \"4b3dfd2c-a156-4f5f-b950-94623d183859\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.775860 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b3dfd2c-a156-4f5f-b950-94623d183859-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4b3dfd2c-a156-4f5f-b950-94623d183859" (UID: "4b3dfd2c-a156-4f5f-b950-94623d183859"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.775957 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/183c8e61-03c5-46d5-a906-7943bf183913-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "183c8e61-03c5-46d5-a906-7943bf183913" (UID: "183c8e61-03c5-46d5-a906-7943bf183913"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.779107 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b3dfd2c-a156-4f5f-b950-94623d183859-kube-api-access-mdrs5" (OuterVolumeSpecName: "kube-api-access-mdrs5") pod "4b3dfd2c-a156-4f5f-b950-94623d183859" (UID: "4b3dfd2c-a156-4f5f-b950-94623d183859"). InnerVolumeSpecName "kube-api-access-mdrs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.779194 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/183c8e61-03c5-46d5-a906-7943bf183913-kube-api-access-69l2z" (OuterVolumeSpecName: "kube-api-access-69l2z") pod "183c8e61-03c5-46d5-a906-7943bf183913" (UID: "183c8e61-03c5-46d5-a906-7943bf183913"). InnerVolumeSpecName "kube-api-access-69l2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.781768 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ce1-account-create-update-n5v6g" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.829144 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1723-account-create-update-hmglv" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.829143 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1723-account-create-update-hmglv" event={"ID":"e1914945-6f71-4d08-8c84-d02706ed7b17","Type":"ContainerDied","Data":"61c56a73b890bb3953422c7194b5c8b8bc77ffc507612d6b74d6a7cdb0e5d012"} Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.829223 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61c56a73b890bb3953422c7194b5c8b8bc77ffc507612d6b74d6a7cdb0e5d012" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.830802 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sjx4x" event={"ID":"9cc949a4-49ca-42c2-b427-e8586dad8ebc","Type":"ContainerStarted","Data":"badd043f150336fe753cf4703c39aff99f4b65cb758bead86f410a1754883bff"} Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.834098 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-24c4-account-create-update-htjw8" event={"ID":"4b3dfd2c-a156-4f5f-b950-94623d183859","Type":"ContainerDied","Data":"402731aa5d0bb1e608f86ac0ce089573904acab6371be8b031e044280ac245e0"} Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.834117 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-24c4-account-create-update-htjw8" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.834122 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="402731aa5d0bb1e608f86ac0ce089573904acab6371be8b031e044280ac245e0" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.839199 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6lkf2" event={"ID":"feb054c3-cc6f-4af4-9dce-0683f20ec01a","Type":"ContainerDied","Data":"5afda73f8d4e28fcc93ec9a9522cb632323a2d2596200c62873723f1cf31101d"} Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.839235 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5afda73f8d4e28fcc93ec9a9522cb632323a2d2596200c62873723f1cf31101d" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.839299 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6lkf2" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.850707 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rwhks" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.850713 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rwhks" event={"ID":"183c8e61-03c5-46d5-a906-7943bf183913","Type":"ContainerDied","Data":"b5c6dc672f55c07c11ddc0d3984e5c834645ff8d77ebf0db219c98d57acbee40"} Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.850857 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b5c6dc672f55c07c11ddc0d3984e5c834645ff8d77ebf0db219c98d57acbee40" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.853755 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ce1-account-create-update-n5v6g" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.853745 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ce1-account-create-update-n5v6g" event={"ID":"c269e4e2-0a9c-45ae-8a09-066ef4203036","Type":"ContainerDied","Data":"275b328a2b442dbb3ee62f7ac505e796f148129f4149f6d4fe0e0274128f948b"} Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.853855 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="275b328a2b442dbb3ee62f7ac505e796f148129f4149f6d4fe0e0274128f948b" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.855644 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-sjx4x" podStartSLOduration=2.863158791 podStartE2EDuration="8.855629865s" podCreationTimestamp="2025-12-09 17:16:58 +0000 UTC" firstStartedPulling="2025-12-09 17:17:00.526456876 +0000 UTC m=+1206.517567509" lastFinishedPulling="2025-12-09 17:17:06.51892794 +0000 UTC m=+1212.510038583" observedRunningTime="2025-12-09 17:17:06.844777533 +0000 UTC m=+1212.835888166" watchObservedRunningTime="2025-12-09 17:17:06.855629865 +0000 UTC m=+1212.846740508" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.856704 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-905b-account-create-update-xmh8g" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.856791 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-905b-account-create-update-xmh8g" event={"ID":"63846955-b953-4eeb-9c6b-72a87b9740e8","Type":"ContainerDied","Data":"2e29b8c598a13bd6e418f03fcf01d9305fd2bd66e5342f19940b04fe24d02875"} Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.856824 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e29b8c598a13bd6e418f03fcf01d9305fd2bd66e5342f19940b04fe24d02875" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.860660 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-v78xq-config-vp2k7" event={"ID":"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7","Type":"ContainerStarted","Data":"67ba6b3da9638f546d4eb93f05d79d22d0d7e417db5612ef4d6d192cd901ef1c"} Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.876454 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/feb054c3-cc6f-4af4-9dce-0683f20ec01a-operator-scripts\") pod \"feb054c3-cc6f-4af4-9dce-0683f20ec01a\" (UID: \"feb054c3-cc6f-4af4-9dce-0683f20ec01a\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.876503 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgr9r\" (UniqueName: \"kubernetes.io/projected/c269e4e2-0a9c-45ae-8a09-066ef4203036-kube-api-access-sgr9r\") pod \"c269e4e2-0a9c-45ae-8a09-066ef4203036\" (UID: \"c269e4e2-0a9c-45ae-8a09-066ef4203036\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.876526 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1914945-6f71-4d08-8c84-d02706ed7b17-operator-scripts\") pod \"e1914945-6f71-4d08-8c84-d02706ed7b17\" (UID: \"e1914945-6f71-4d08-8c84-d02706ed7b17\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.876573 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pxlp\" (UniqueName: \"kubernetes.io/projected/feb054c3-cc6f-4af4-9dce-0683f20ec01a-kube-api-access-8pxlp\") pod \"feb054c3-cc6f-4af4-9dce-0683f20ec01a\" (UID: \"feb054c3-cc6f-4af4-9dce-0683f20ec01a\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.876610 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82299\" (UniqueName: \"kubernetes.io/projected/63846955-b953-4eeb-9c6b-72a87b9740e8-kube-api-access-82299\") pod \"63846955-b953-4eeb-9c6b-72a87b9740e8\" (UID: \"63846955-b953-4eeb-9c6b-72a87b9740e8\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.876710 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-678gk\" (UniqueName: \"kubernetes.io/projected/e1914945-6f71-4d08-8c84-d02706ed7b17-kube-api-access-678gk\") pod \"e1914945-6f71-4d08-8c84-d02706ed7b17\" (UID: \"e1914945-6f71-4d08-8c84-d02706ed7b17\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.876745 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63846955-b953-4eeb-9c6b-72a87b9740e8-operator-scripts\") pod \"63846955-b953-4eeb-9c6b-72a87b9740e8\" (UID: \"63846955-b953-4eeb-9c6b-72a87b9740e8\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.876769 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c269e4e2-0a9c-45ae-8a09-066ef4203036-operator-scripts\") pod \"c269e4e2-0a9c-45ae-8a09-066ef4203036\" (UID: \"c269e4e2-0a9c-45ae-8a09-066ef4203036\") " Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.877089 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/feb054c3-cc6f-4af4-9dce-0683f20ec01a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "feb054c3-cc6f-4af4-9dce-0683f20ec01a" (UID: "feb054c3-cc6f-4af4-9dce-0683f20ec01a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.877139 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1914945-6f71-4d08-8c84-d02706ed7b17-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e1914945-6f71-4d08-8c84-d02706ed7b17" (UID: "e1914945-6f71-4d08-8c84-d02706ed7b17"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.877471 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c269e4e2-0a9c-45ae-8a09-066ef4203036-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c269e4e2-0a9c-45ae-8a09-066ef4203036" (UID: "c269e4e2-0a9c-45ae-8a09-066ef4203036"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.877530 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/183c8e61-03c5-46d5-a906-7943bf183913-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.877548 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b3dfd2c-a156-4f5f-b950-94623d183859-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.877560 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69l2z\" (UniqueName: \"kubernetes.io/projected/183c8e61-03c5-46d5-a906-7943bf183913-kube-api-access-69l2z\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.877556 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63846955-b953-4eeb-9c6b-72a87b9740e8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "63846955-b953-4eeb-9c6b-72a87b9740e8" (UID: "63846955-b953-4eeb-9c6b-72a87b9740e8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.877570 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/feb054c3-cc6f-4af4-9dce-0683f20ec01a-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.877579 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1914945-6f71-4d08-8c84-d02706ed7b17-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.877588 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdrs5\" (UniqueName: \"kubernetes.io/projected/4b3dfd2c-a156-4f5f-b950-94623d183859-kube-api-access-mdrs5\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.881134 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63846955-b953-4eeb-9c6b-72a87b9740e8-kube-api-access-82299" (OuterVolumeSpecName: "kube-api-access-82299") pod "63846955-b953-4eeb-9c6b-72a87b9740e8" (UID: "63846955-b953-4eeb-9c6b-72a87b9740e8"). InnerVolumeSpecName "kube-api-access-82299". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.881175 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1914945-6f71-4d08-8c84-d02706ed7b17-kube-api-access-678gk" (OuterVolumeSpecName: "kube-api-access-678gk") pod "e1914945-6f71-4d08-8c84-d02706ed7b17" (UID: "e1914945-6f71-4d08-8c84-d02706ed7b17"). InnerVolumeSpecName "kube-api-access-678gk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.881571 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/feb054c3-cc6f-4af4-9dce-0683f20ec01a-kube-api-access-8pxlp" (OuterVolumeSpecName: "kube-api-access-8pxlp") pod "feb054c3-cc6f-4af4-9dce-0683f20ec01a" (UID: "feb054c3-cc6f-4af4-9dce-0683f20ec01a"). InnerVolumeSpecName "kube-api-access-8pxlp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.881757 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c269e4e2-0a9c-45ae-8a09-066ef4203036-kube-api-access-sgr9r" (OuterVolumeSpecName: "kube-api-access-sgr9r") pod "c269e4e2-0a9c-45ae-8a09-066ef4203036" (UID: "c269e4e2-0a9c-45ae-8a09-066ef4203036"). InnerVolumeSpecName "kube-api-access-sgr9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.979985 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pxlp\" (UniqueName: \"kubernetes.io/projected/feb054c3-cc6f-4af4-9dce-0683f20ec01a-kube-api-access-8pxlp\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.980021 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82299\" (UniqueName: \"kubernetes.io/projected/63846955-b953-4eeb-9c6b-72a87b9740e8-kube-api-access-82299\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.980032 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-678gk\" (UniqueName: \"kubernetes.io/projected/e1914945-6f71-4d08-8c84-d02706ed7b17-kube-api-access-678gk\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.980045 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63846955-b953-4eeb-9c6b-72a87b9740e8-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.980056 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c269e4e2-0a9c-45ae-8a09-066ef4203036-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:06 crc kubenswrapper[4840]: I1209 17:17:06.980065 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgr9r\" (UniqueName: \"kubernetes.io/projected/c269e4e2-0a9c-45ae-8a09-066ef4203036-kube-api-access-sgr9r\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:07 crc kubenswrapper[4840]: I1209 17:17:07.308850 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-v78xq" Dec 09 17:17:07 crc kubenswrapper[4840]: I1209 17:17:07.870142 4840 generic.go:334] "Generic (PLEG): container finished" podID="b9e6ad3c-ffa6-4584-accb-1003fdcf18d7" containerID="00adfa2e3245da69d9392e24a34b9a3e60188a038c6325fa7e0dcd165b2ecf83" exitCode=0 Dec 09 17:17:07 crc kubenswrapper[4840]: I1209 17:17:07.870355 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-v78xq-config-vp2k7" event={"ID":"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7","Type":"ContainerDied","Data":"00adfa2e3245da69d9392e24a34b9a3e60188a038c6325fa7e0dcd165b2ecf83"} Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.104257 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.204433 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-lqngs"] Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.204824 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" podUID="97461318-407b-4b1d-b565-b0fcdd35a60d" containerName="dnsmasq-dns" containerID="cri-o://c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8" gracePeriod=10 Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.683382 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.819740 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-sb\") pod \"97461318-407b-4b1d-b565-b0fcdd35a60d\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.819828 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-nb\") pod \"97461318-407b-4b1d-b565-b0fcdd35a60d\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.819872 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-dns-svc\") pod \"97461318-407b-4b1d-b565-b0fcdd35a60d\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.820026 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-config\") pod \"97461318-407b-4b1d-b565-b0fcdd35a60d\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.820077 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqk98\" (UniqueName: \"kubernetes.io/projected/97461318-407b-4b1d-b565-b0fcdd35a60d-kube-api-access-lqk98\") pod \"97461318-407b-4b1d-b565-b0fcdd35a60d\" (UID: \"97461318-407b-4b1d-b565-b0fcdd35a60d\") " Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.835516 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97461318-407b-4b1d-b565-b0fcdd35a60d-kube-api-access-lqk98" (OuterVolumeSpecName: "kube-api-access-lqk98") pod "97461318-407b-4b1d-b565-b0fcdd35a60d" (UID: "97461318-407b-4b1d-b565-b0fcdd35a60d"). InnerVolumeSpecName "kube-api-access-lqk98". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.877906 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "97461318-407b-4b1d-b565-b0fcdd35a60d" (UID: "97461318-407b-4b1d-b565-b0fcdd35a60d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.883780 4840 generic.go:334] "Generic (PLEG): container finished" podID="97461318-407b-4b1d-b565-b0fcdd35a60d" containerID="c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8" exitCode=0 Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.883919 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" event={"ID":"97461318-407b-4b1d-b565-b0fcdd35a60d","Type":"ContainerDied","Data":"c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8"} Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.883988 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" event={"ID":"97461318-407b-4b1d-b565-b0fcdd35a60d","Type":"ContainerDied","Data":"f8d110bf9a0d63cd3686db03ead4ce11718a9c2e818be7f75ea0a671714171c3"} Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.884004 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-lqngs" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.884011 4840 scope.go:117] "RemoveContainer" containerID="c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.889443 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "97461318-407b-4b1d-b565-b0fcdd35a60d" (UID: "97461318-407b-4b1d-b565-b0fcdd35a60d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.895283 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-config" (OuterVolumeSpecName: "config") pod "97461318-407b-4b1d-b565-b0fcdd35a60d" (UID: "97461318-407b-4b1d-b565-b0fcdd35a60d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.895542 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "97461318-407b-4b1d-b565-b0fcdd35a60d" (UID: "97461318-407b-4b1d-b565-b0fcdd35a60d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.922306 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.922350 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.922363 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.922375 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97461318-407b-4b1d-b565-b0fcdd35a60d-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.922391 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqk98\" (UniqueName: \"kubernetes.io/projected/97461318-407b-4b1d-b565-b0fcdd35a60d-kube-api-access-lqk98\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:08 crc kubenswrapper[4840]: I1209 17:17:08.972040 4840 scope.go:117] "RemoveContainer" containerID="60344c6015aaa186c19c1179312dadac2a1c720efe01f5c1709f8b0e86711d1e" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.004347 4840 scope.go:117] "RemoveContainer" containerID="c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8" Dec 09 17:17:09 crc kubenswrapper[4840]: E1209 17:17:09.006287 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8\": container with ID starting with c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8 not found: ID does not exist" containerID="c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.006365 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8"} err="failed to get container status \"c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8\": rpc error: code = NotFound desc = could not find container \"c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8\": container with ID starting with c2d2d4af2311fd144f95d912f1cf583321c5100b4a8c81eb46d6d07bee7f33f8 not found: ID does not exist" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.006426 4840 scope.go:117] "RemoveContainer" containerID="60344c6015aaa186c19c1179312dadac2a1c720efe01f5c1709f8b0e86711d1e" Dec 09 17:17:09 crc kubenswrapper[4840]: E1209 17:17:09.007007 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60344c6015aaa186c19c1179312dadac2a1c720efe01f5c1709f8b0e86711d1e\": container with ID starting with 60344c6015aaa186c19c1179312dadac2a1c720efe01f5c1709f8b0e86711d1e not found: ID does not exist" containerID="60344c6015aaa186c19c1179312dadac2a1c720efe01f5c1709f8b0e86711d1e" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.007066 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60344c6015aaa186c19c1179312dadac2a1c720efe01f5c1709f8b0e86711d1e"} err="failed to get container status \"60344c6015aaa186c19c1179312dadac2a1c720efe01f5c1709f8b0e86711d1e\": rpc error: code = NotFound desc = could not find container \"60344c6015aaa186c19c1179312dadac2a1c720efe01f5c1709f8b0e86711d1e\": container with ID starting with 60344c6015aaa186c19c1179312dadac2a1c720efe01f5c1709f8b0e86711d1e not found: ID does not exist" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.172897 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.227327 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-log-ovn\") pod \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.227435 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7" (UID: "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.227469 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run-ovn\") pod \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.227553 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-additional-scripts\") pod \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.227580 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-scripts\") pod \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.227583 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7" (UID: "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.227634 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run\") pod \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.227672 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx84j\" (UniqueName: \"kubernetes.io/projected/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-kube-api-access-vx84j\") pod \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\" (UID: \"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7\") " Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.227747 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run" (OuterVolumeSpecName: "var-run") pod "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7" (UID: "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.228045 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-lqngs"] Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.228122 4840 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.228135 4840 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-log-ovn\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.228144 4840 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-var-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.228382 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7" (UID: "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.229467 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-scripts" (OuterVolumeSpecName: "scripts") pod "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7" (UID: "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.232509 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-kube-api-access-vx84j" (OuterVolumeSpecName: "kube-api-access-vx84j") pod "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7" (UID: "b9e6ad3c-ffa6-4584-accb-1003fdcf18d7"). InnerVolumeSpecName "kube-api-access-vx84j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.237598 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-lqngs"] Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.330145 4840 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-additional-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.330457 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.330468 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx84j\" (UniqueName: \"kubernetes.io/projected/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7-kube-api-access-vx84j\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.903697 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-v78xq-config-vp2k7" event={"ID":"b9e6ad3c-ffa6-4584-accb-1003fdcf18d7","Type":"ContainerDied","Data":"67ba6b3da9638f546d4eb93f05d79d22d0d7e417db5612ef4d6d192cd901ef1c"} Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.903759 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67ba6b3da9638f546d4eb93f05d79d22d0d7e417db5612ef4d6d192cd901ef1c" Dec 09 17:17:09 crc kubenswrapper[4840]: I1209 17:17:09.903860 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-v78xq-config-vp2k7" Dec 09 17:17:10 crc kubenswrapper[4840]: I1209 17:17:10.307187 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-v78xq-config-vp2k7"] Dec 09 17:17:10 crc kubenswrapper[4840]: I1209 17:17:10.323055 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-v78xq-config-vp2k7"] Dec 09 17:17:10 crc kubenswrapper[4840]: I1209 17:17:10.624095 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97461318-407b-4b1d-b565-b0fcdd35a60d" path="/var/lib/kubelet/pods/97461318-407b-4b1d-b565-b0fcdd35a60d/volumes" Dec 09 17:17:10 crc kubenswrapper[4840]: I1209 17:17:10.624939 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9e6ad3c-ffa6-4584-accb-1003fdcf18d7" path="/var/lib/kubelet/pods/b9e6ad3c-ffa6-4584-accb-1003fdcf18d7/volumes" Dec 09 17:17:10 crc kubenswrapper[4840]: I1209 17:17:10.919005 4840 generic.go:334] "Generic (PLEG): container finished" podID="9cc949a4-49ca-42c2-b427-e8586dad8ebc" containerID="badd043f150336fe753cf4703c39aff99f4b65cb758bead86f410a1754883bff" exitCode=0 Dec 09 17:17:10 crc kubenswrapper[4840]: I1209 17:17:10.919055 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sjx4x" event={"ID":"9cc949a4-49ca-42c2-b427-e8586dad8ebc","Type":"ContainerDied","Data":"badd043f150336fe753cf4703c39aff99f4b65cb758bead86f410a1754883bff"} Dec 09 17:17:11 crc kubenswrapper[4840]: I1209 17:17:11.936100 4840 generic.go:334] "Generic (PLEG): container finished" podID="edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4" containerID="181bb0d18ca2eba399cf6a2b8c6dc7c110edc4dbb23113680276ac559413aa95" exitCode=0 Dec 09 17:17:11 crc kubenswrapper[4840]: I1209 17:17:11.936205 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4","Type":"ContainerDied","Data":"181bb0d18ca2eba399cf6a2b8c6dc7c110edc4dbb23113680276ac559413aa95"} Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.306310 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.388874 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tgp9\" (UniqueName: \"kubernetes.io/projected/9cc949a4-49ca-42c2-b427-e8586dad8ebc-kube-api-access-9tgp9\") pod \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.389018 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-config-data\") pod \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.389171 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-combined-ca-bundle\") pod \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\" (UID: \"9cc949a4-49ca-42c2-b427-e8586dad8ebc\") " Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.392509 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cc949a4-49ca-42c2-b427-e8586dad8ebc-kube-api-access-9tgp9" (OuterVolumeSpecName: "kube-api-access-9tgp9") pod "9cc949a4-49ca-42c2-b427-e8586dad8ebc" (UID: "9cc949a4-49ca-42c2-b427-e8586dad8ebc"). InnerVolumeSpecName "kube-api-access-9tgp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.410953 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9cc949a4-49ca-42c2-b427-e8586dad8ebc" (UID: "9cc949a4-49ca-42c2-b427-e8586dad8ebc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.432854 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-config-data" (OuterVolumeSpecName: "config-data") pod "9cc949a4-49ca-42c2-b427-e8586dad8ebc" (UID: "9cc949a4-49ca-42c2-b427-e8586dad8ebc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.491060 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.491112 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tgp9\" (UniqueName: \"kubernetes.io/projected/9cc949a4-49ca-42c2-b427-e8586dad8ebc-kube-api-access-9tgp9\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.491123 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9cc949a4-49ca-42c2-b427-e8586dad8ebc-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.948335 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sjx4x" event={"ID":"9cc949a4-49ca-42c2-b427-e8586dad8ebc","Type":"ContainerDied","Data":"8613b643d9b7b1c7efc530c0a61e26c086de93a13fb31623d4ace4c7690b38c4"} Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.948375 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8613b643d9b7b1c7efc530c0a61e26c086de93a13fb31623d4ace4c7690b38c4" Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.948446 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sjx4x" Dec 09 17:17:12 crc kubenswrapper[4840]: I1209 17:17:12.951630 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4","Type":"ContainerStarted","Data":"ffecfd8251e8c29844d6412dd4abd80917c73481e7f2757f2f34bd20ebb115b3"} Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.311688 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-lvtn5"] Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312452 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cc949a4-49ca-42c2-b427-e8586dad8ebc" containerName="keystone-db-sync" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312471 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cc949a4-49ca-42c2-b427-e8586dad8ebc" containerName="keystone-db-sync" Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312484 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63846955-b953-4eeb-9c6b-72a87b9740e8" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312492 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="63846955-b953-4eeb-9c6b-72a87b9740e8" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312508 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="183c8e61-03c5-46d5-a906-7943bf183913" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312516 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="183c8e61-03c5-46d5-a906-7943bf183913" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312529 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69a097ba-a134-4d5f-906b-0cdb275ff034" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312536 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="69a097ba-a134-4d5f-906b-0cdb275ff034" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312547 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b3dfd2c-a156-4f5f-b950-94623d183859" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312554 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b3dfd2c-a156-4f5f-b950-94623d183859" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312568 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97461318-407b-4b1d-b565-b0fcdd35a60d" containerName="init" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312576 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="97461318-407b-4b1d-b565-b0fcdd35a60d" containerName="init" Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312589 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c269e4e2-0a9c-45ae-8a09-066ef4203036" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312597 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="c269e4e2-0a9c-45ae-8a09-066ef4203036" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312605 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9e6ad3c-ffa6-4584-accb-1003fdcf18d7" containerName="ovn-config" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312612 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9e6ad3c-ffa6-4584-accb-1003fdcf18d7" containerName="ovn-config" Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312625 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312633 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312656 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="feb054c3-cc6f-4af4-9dce-0683f20ec01a" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312664 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="feb054c3-cc6f-4af4-9dce-0683f20ec01a" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312681 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97461318-407b-4b1d-b565-b0fcdd35a60d" containerName="dnsmasq-dns" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312688 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="97461318-407b-4b1d-b565-b0fcdd35a60d" containerName="dnsmasq-dns" Dec 09 17:17:13 crc kubenswrapper[4840]: E1209 17:17:13.312698 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1914945-6f71-4d08-8c84-d02706ed7b17" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312705 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1914945-6f71-4d08-8c84-d02706ed7b17" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312943 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="c269e4e2-0a9c-45ae-8a09-066ef4203036" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312986 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="97461318-407b-4b1d-b565-b0fcdd35a60d" containerName="dnsmasq-dns" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.312998 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="feb054c3-cc6f-4af4-9dce-0683f20ec01a" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.313010 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9e6ad3c-ffa6-4584-accb-1003fdcf18d7" containerName="ovn-config" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.313028 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.313037 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="69a097ba-a134-4d5f-906b-0cdb275ff034" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.313054 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="183c8e61-03c5-46d5-a906-7943bf183913" containerName="mariadb-database-create" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.313068 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b3dfd2c-a156-4f5f-b950-94623d183859" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.313085 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cc949a4-49ca-42c2-b427-e8586dad8ebc" containerName="keystone-db-sync" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.313096 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1914945-6f71-4d08-8c84-d02706ed7b17" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.313111 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="63846955-b953-4eeb-9c6b-72a87b9740e8" containerName="mariadb-account-create-update" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.313948 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.317668 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-ddftm" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.317955 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.317955 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.318092 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.318121 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.327747 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lvtn5"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.352323 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-dhw7j"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.355364 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.398691 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-dhw7j"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.412984 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6htp\" (UniqueName: \"kubernetes.io/projected/27193ac5-22ed-4e67-b349-896bb73c6512-kube-api-access-w6htp\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.413057 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-config\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.413091 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.413169 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.413223 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-scripts\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.413262 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.413300 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-config-data\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.413326 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7n6l2\" (UniqueName: \"kubernetes.io/projected/a59755b7-522b-477f-a8f5-6d8e55ca1730-kube-api-access-7n6l2\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.413357 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-credential-keys\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.413387 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-fernet-keys\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.413410 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-combined-ca-bundle\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.413432 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.504901 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-qv6wf"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.507555 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.512257 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-zm65n" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.514706 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.514731 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.515728 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-scripts\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.515775 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.515804 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-config-data\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.515824 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7n6l2\" (UniqueName: \"kubernetes.io/projected/a59755b7-522b-477f-a8f5-6d8e55ca1730-kube-api-access-7n6l2\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.515845 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-credential-keys\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.515865 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-fernet-keys\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.515884 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-combined-ca-bundle\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.515899 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.515929 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6htp\" (UniqueName: \"kubernetes.io/projected/27193ac5-22ed-4e67-b349-896bb73c6512-kube-api-access-w6htp\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.515952 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-config\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.516086 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.516147 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.517008 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.518614 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.521016 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.521172 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-config\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.521578 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-credential-keys\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.521575 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-fernet-keys\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.521666 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.521871 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-scripts\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.524716 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-config-data\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.542903 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-combined-ca-bundle\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.548294 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-qv6wf"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.557885 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7n6l2\" (UniqueName: \"kubernetes.io/projected/a59755b7-522b-477f-a8f5-6d8e55ca1730-kube-api-access-7n6l2\") pod \"dnsmasq-dns-6f8c45789f-dhw7j\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.560225 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6htp\" (UniqueName: \"kubernetes.io/projected/27193ac5-22ed-4e67-b349-896bb73c6512-kube-api-access-w6htp\") pod \"keystone-bootstrap-lvtn5\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.565761 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.575267 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.581831 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.581991 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.617861 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-config-data\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.617907 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7ff5b771-f400-4f66-9d95-9f66fff18a82-etc-machine-id\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.617950 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-db-sync-config-data\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.617983 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-config-data\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.618028 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-scripts\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.618047 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.618171 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-run-httpd\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.618216 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-log-httpd\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.618307 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.618434 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-combined-ca-bundle\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.618469 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5sfs\" (UniqueName: \"kubernetes.io/projected/7ff5b771-f400-4f66-9d95-9f66fff18a82-kube-api-access-t5sfs\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.618508 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-scripts\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.618579 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9srgf\" (UniqueName: \"kubernetes.io/projected/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-kube-api-access-9srgf\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.647799 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.694352 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.696013 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720112 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-scripts\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720166 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720204 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-run-httpd\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720230 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-log-httpd\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720281 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720342 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-combined-ca-bundle\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720366 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5sfs\" (UniqueName: \"kubernetes.io/projected/7ff5b771-f400-4f66-9d95-9f66fff18a82-kube-api-access-t5sfs\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720395 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-scripts\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720434 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9srgf\" (UniqueName: \"kubernetes.io/projected/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-kube-api-access-9srgf\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720533 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-config-data\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720570 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7ff5b771-f400-4f66-9d95-9f66fff18a82-etc-machine-id\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720627 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-db-sync-config-data\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720656 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-config-data\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.720735 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-run-httpd\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.723025 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-dhw7j"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.724490 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7ff5b771-f400-4f66-9d95-9f66fff18a82-etc-machine-id\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.726154 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-log-httpd\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.732308 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-scripts\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.734703 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-config-data\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.789580 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-combined-ca-bundle\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.790257 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-config-data\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.800542 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-scripts\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.807504 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.808778 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-frlvd"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.809976 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.811156 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.811222 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5sfs\" (UniqueName: \"kubernetes.io/projected/7ff5b771-f400-4f66-9d95-9f66fff18a82-kube-api-access-t5sfs\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.813158 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-db-sync-config-data\") pod \"cinder-db-sync-qv6wf\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.814309 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.814524 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-klcfc" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.816402 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9srgf\" (UniqueName: \"kubernetes.io/projected/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-kube-api-access-9srgf\") pod \"ceilometer-0\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.819016 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-ps8ql"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.820491 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.822429 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.859873 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-frlvd"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.906609 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-ps8ql"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.916469 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-4ts68"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.920418 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.923378 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-nh9m5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.923530 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.923658 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.924720 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-config\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.924821 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-db-sync-config-data\") pod \"barbican-db-sync-frlvd\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.924892 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwxv7\" (UniqueName: \"kubernetes.io/projected/97895c55-e758-4bd3-981c-2c9bd5eeabcb-kube-api-access-fwxv7\") pod \"barbican-db-sync-frlvd\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.924956 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.925069 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.925155 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-scripts\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.925257 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.925326 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.925409 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-config-data\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.925499 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64c96895-a046-41da-83d1-5cb61d38de00-logs\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.925578 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-combined-ca-bundle\") pod \"barbican-db-sync-frlvd\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.925659 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grffj\" (UniqueName: \"kubernetes.io/projected/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-kube-api-access-grffj\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.925732 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-combined-ca-bundle\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.925803 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-4ts68"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.925955 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzpk9\" (UniqueName: \"kubernetes.io/projected/64c96895-a046-41da-83d1-5cb61d38de00-kube-api-access-mzpk9\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.926550 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.933182 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-db-sync-f6mr5"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.934488 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.941518 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-zthv5"] Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.945910 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.952916 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-scripts" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.953350 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-config-data" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.954301 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cloudkitty-client-internal" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.954344 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-cloudkitty-dockercfg-4pj7n" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.954567 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-qtt8g" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.956215 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.957303 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 09 17:17:13 crc kubenswrapper[4840]: I1209 17:17:13.997666 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-sync-f6mr5"] Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.023997 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-zthv5"] Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.026949 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-config\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.026995 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-db-sync-config-data\") pod \"barbican-db-sync-frlvd\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027018 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwxv7\" (UniqueName: \"kubernetes.io/projected/97895c55-e758-4bd3-981c-2c9bd5eeabcb-kube-api-access-fwxv7\") pod \"barbican-db-sync-frlvd\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027054 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027074 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027097 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-config-data\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027119 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-scripts\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027161 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027179 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027193 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-config-data\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027225 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-combined-ca-bundle\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027247 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64c96895-a046-41da-83d1-5cb61d38de00-logs\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027262 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr76w\" (UniqueName: \"kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-kube-api-access-pr76w\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027291 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-combined-ca-bundle\") pod \"barbican-db-sync-frlvd\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027332 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grffj\" (UniqueName: \"kubernetes.io/projected/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-kube-api-access-grffj\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027351 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-certs\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027367 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-combined-ca-bundle\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027389 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-config\") pod \"neutron-db-sync-zthv5\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027407 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6d64\" (UniqueName: \"kubernetes.io/projected/e9144e8b-9235-4e97-83a5-7525f0986083-kube-api-access-x6d64\") pod \"neutron-db-sync-zthv5\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027438 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzpk9\" (UniqueName: \"kubernetes.io/projected/64c96895-a046-41da-83d1-5cb61d38de00-kube-api-access-mzpk9\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027456 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-combined-ca-bundle\") pod \"neutron-db-sync-zthv5\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.027471 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-scripts\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.028742 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-config\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.029602 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64c96895-a046-41da-83d1-5cb61d38de00-logs\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.030457 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.030942 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.031129 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.032028 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.035584 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-scripts\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.037795 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-config-data\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.042022 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-combined-ca-bundle\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.051275 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-combined-ca-bundle\") pod \"barbican-db-sync-frlvd\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.052732 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-db-sync-config-data\") pod \"barbican-db-sync-frlvd\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.055334 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwxv7\" (UniqueName: \"kubernetes.io/projected/97895c55-e758-4bd3-981c-2c9bd5eeabcb-kube-api-access-fwxv7\") pod \"barbican-db-sync-frlvd\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.056649 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzpk9\" (UniqueName: \"kubernetes.io/projected/64c96895-a046-41da-83d1-5cb61d38de00-kube-api-access-mzpk9\") pod \"placement-db-sync-4ts68\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.061069 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grffj\" (UniqueName: \"kubernetes.io/projected/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-kube-api-access-grffj\") pod \"dnsmasq-dns-fcfdd6f9f-ps8ql\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.129914 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-combined-ca-bundle\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.129975 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr76w\" (UniqueName: \"kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-kube-api-access-pr76w\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.130030 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-certs\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.130064 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-config\") pod \"neutron-db-sync-zthv5\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.130086 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6d64\" (UniqueName: \"kubernetes.io/projected/e9144e8b-9235-4e97-83a5-7525f0986083-kube-api-access-x6d64\") pod \"neutron-db-sync-zthv5\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.130134 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-combined-ca-bundle\") pod \"neutron-db-sync-zthv5\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.130151 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-scripts\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.130206 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-config-data\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.201873 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.219589 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.279072 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-combined-ca-bundle\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.280236 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-config\") pod \"neutron-db-sync-zthv5\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.280543 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-combined-ca-bundle\") pod \"neutron-db-sync-zthv5\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.280648 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-config-data\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.282586 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr76w\" (UniqueName: \"kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-kube-api-access-pr76w\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.282667 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6d64\" (UniqueName: \"kubernetes.io/projected/e9144e8b-9235-4e97-83a5-7525f0986083-kube-api-access-x6d64\") pod \"neutron-db-sync-zthv5\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.282954 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-scripts\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.284447 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-certs\") pod \"cloudkitty-db-sync-f6mr5\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.295340 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lvtn5"] Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.439646 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-dhw7j"] Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.529033 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4ts68" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.557839 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.560776 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-qv6wf"] Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.572500 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.573633 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:14 crc kubenswrapper[4840]: W1209 17:17:14.575280 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ff5b771_f400_4f66_9d95_9f66fff18a82.slice/crio-7d6359b4eed65e67958458ec6fe303f82c86b968223fbdabd44b6785339b8f56 WatchSource:0}: Error finding container 7d6359b4eed65e67958458ec6fe303f82c86b968223fbdabd44b6785339b8f56: Status 404 returned error can't find the container with id 7d6359b4eed65e67958458ec6fe303f82c86b968223fbdabd44b6785339b8f56 Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.828361 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-frlvd"] Dec 09 17:17:14 crc kubenswrapper[4840]: I1209 17:17:14.964566 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-ps8ql"] Dec 09 17:17:15 crc kubenswrapper[4840]: I1209 17:17:15.001200 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lvtn5" event={"ID":"27193ac5-22ed-4e67-b349-896bb73c6512","Type":"ContainerStarted","Data":"c63f74592524846134158381dc90bf5c6105fd307b8291e89b6dfe7590acd8e1"} Dec 09 17:17:15 crc kubenswrapper[4840]: I1209 17:17:15.018820 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qv6wf" event={"ID":"7ff5b771-f400-4f66-9d95-9f66fff18a82","Type":"ContainerStarted","Data":"7d6359b4eed65e67958458ec6fe303f82c86b968223fbdabd44b6785339b8f56"} Dec 09 17:17:15 crc kubenswrapper[4840]: I1209 17:17:15.021896 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" event={"ID":"a59755b7-522b-477f-a8f5-6d8e55ca1730","Type":"ContainerStarted","Data":"c06c905203e299e4091a6c1e772020d35c719d76c5741b6f6206947596a1e44f"} Dec 09 17:17:15 crc kubenswrapper[4840]: I1209 17:17:15.030527 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f","Type":"ContainerStarted","Data":"cb1a3c61d39dea862f5e1df48256b9e4689a85e4415957ac0c37b6131eb10337"} Dec 09 17:17:15 crc kubenswrapper[4840]: W1209 17:17:15.113704 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97895c55_e758_4bd3_981c_2c9bd5eeabcb.slice/crio-037a10c81a512060709a8ae57ce47505e12ee21bf54d3bd3c5d8d291add8ceb3 WatchSource:0}: Error finding container 037a10c81a512060709a8ae57ce47505e12ee21bf54d3bd3c5d8d291add8ceb3: Status 404 returned error can't find the container with id 037a10c81a512060709a8ae57ce47505e12ee21bf54d3bd3c5d8d291add8ceb3 Dec 09 17:17:15 crc kubenswrapper[4840]: W1209 17:17:15.115085 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcede2dd2_02e6_4edd_9ba1_ed7c49df38a5.slice/crio-a6cacf2b2e20a73dcfcd4a403fd900b43834f0fea99a839b4e37df412f1c9b4d WatchSource:0}: Error finding container a6cacf2b2e20a73dcfcd4a403fd900b43834f0fea99a839b4e37df412f1c9b4d: Status 404 returned error can't find the container with id a6cacf2b2e20a73dcfcd4a403fd900b43834f0fea99a839b4e37df412f1c9b4d Dec 09 17:17:15 crc kubenswrapper[4840]: W1209 17:17:15.207556 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64c96895_a046_41da_83d1_5cb61d38de00.slice/crio-200d921f9ad943440a48e70cc08a796f8dea939b5fbb5641adc8117b4f32378f WatchSource:0}: Error finding container 200d921f9ad943440a48e70cc08a796f8dea939b5fbb5641adc8117b4f32378f: Status 404 returned error can't find the container with id 200d921f9ad943440a48e70cc08a796f8dea939b5fbb5641adc8117b4f32378f Dec 09 17:17:15 crc kubenswrapper[4840]: I1209 17:17:15.218460 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-4ts68"] Dec 09 17:17:15 crc kubenswrapper[4840]: W1209 17:17:15.229108 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9144e8b_9235_4e97_83a5_7525f0986083.slice/crio-9e0467913937259ccb483d9bf28f0b79d0fde01be5dab4c06699424922aa062e WatchSource:0}: Error finding container 9e0467913937259ccb483d9bf28f0b79d0fde01be5dab4c06699424922aa062e: Status 404 returned error can't find the container with id 9e0467913937259ccb483d9bf28f0b79d0fde01be5dab4c06699424922aa062e Dec 09 17:17:15 crc kubenswrapper[4840]: I1209 17:17:15.232809 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-zthv5"] Dec 09 17:17:15 crc kubenswrapper[4840]: I1209 17:17:15.631956 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-sync-f6mr5"] Dec 09 17:17:15 crc kubenswrapper[4840]: W1209 17:17:15.655900 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6c5b609_3028_4ff9_9bf7_88fa13784f6a.slice/crio-f1ef775402ea577b416476f2793893e9cf46ab45089ab26f9c1d95459dfb5afc WatchSource:0}: Error finding container f1ef775402ea577b416476f2793893e9cf46ab45089ab26f9c1d95459dfb5afc: Status 404 returned error can't find the container with id f1ef775402ea577b416476f2793893e9cf46ab45089ab26f9c1d95459dfb5afc Dec 09 17:17:15 crc kubenswrapper[4840]: I1209 17:17:15.906901 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.047439 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zthv5" event={"ID":"e9144e8b-9235-4e97-83a5-7525f0986083","Type":"ContainerStarted","Data":"9e0467913937259ccb483d9bf28f0b79d0fde01be5dab4c06699424922aa062e"} Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.052273 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" event={"ID":"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5","Type":"ContainerStarted","Data":"75d48060e8cab7ec2cb1727c5465294b96d4b535243816d577622e4cc4be5e3e"} Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.052394 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" event={"ID":"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5","Type":"ContainerStarted","Data":"a6cacf2b2e20a73dcfcd4a403fd900b43834f0fea99a839b4e37df412f1c9b4d"} Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.080301 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" podUID="a59755b7-522b-477f-a8f5-6d8e55ca1730" containerName="init" containerID="cri-o://c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e" gracePeriod=10 Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.087823 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4ts68" event={"ID":"64c96895-a046-41da-83d1-5cb61d38de00","Type":"ContainerStarted","Data":"200d921f9ad943440a48e70cc08a796f8dea939b5fbb5641adc8117b4f32378f"} Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.090858 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-sync-f6mr5" event={"ID":"c6c5b609-3028-4ff9-9bf7-88fa13784f6a","Type":"ContainerStarted","Data":"f1ef775402ea577b416476f2793893e9cf46ab45089ab26f9c1d95459dfb5afc"} Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.101161 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lvtn5" event={"ID":"27193ac5-22ed-4e67-b349-896bb73c6512","Type":"ContainerStarted","Data":"1bcf87ff53df7a1fcef6a8c1425b5d1044fa761ab24bf2f2c82cd5d739dd0fcf"} Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.105980 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-frlvd" event={"ID":"97895c55-e758-4bd3-981c-2c9bd5eeabcb","Type":"ContainerStarted","Data":"037a10c81a512060709a8ae57ce47505e12ee21bf54d3bd3c5d8d291add8ceb3"} Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.616559 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.654547 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-lvtn5" podStartSLOduration=3.654530235 podStartE2EDuration="3.654530235s" podCreationTimestamp="2025-12-09 17:17:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:16.149263345 +0000 UTC m=+1222.140373978" watchObservedRunningTime="2025-12-09 17:17:16.654530235 +0000 UTC m=+1222.645640868" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.729268 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-sb\") pod \"a59755b7-522b-477f-a8f5-6d8e55ca1730\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.729784 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-nb\") pod \"a59755b7-522b-477f-a8f5-6d8e55ca1730\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.729902 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-swift-storage-0\") pod \"a59755b7-522b-477f-a8f5-6d8e55ca1730\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.730031 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-config\") pod \"a59755b7-522b-477f-a8f5-6d8e55ca1730\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.734861 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7n6l2\" (UniqueName: \"kubernetes.io/projected/a59755b7-522b-477f-a8f5-6d8e55ca1730-kube-api-access-7n6l2\") pod \"a59755b7-522b-477f-a8f5-6d8e55ca1730\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.734993 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-svc\") pod \"a59755b7-522b-477f-a8f5-6d8e55ca1730\" (UID: \"a59755b7-522b-477f-a8f5-6d8e55ca1730\") " Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.761579 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a59755b7-522b-477f-a8f5-6d8e55ca1730-kube-api-access-7n6l2" (OuterVolumeSpecName: "kube-api-access-7n6l2") pod "a59755b7-522b-477f-a8f5-6d8e55ca1730" (UID: "a59755b7-522b-477f-a8f5-6d8e55ca1730"). InnerVolumeSpecName "kube-api-access-7n6l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.768666 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a59755b7-522b-477f-a8f5-6d8e55ca1730" (UID: "a59755b7-522b-477f-a8f5-6d8e55ca1730"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.774342 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a59755b7-522b-477f-a8f5-6d8e55ca1730" (UID: "a59755b7-522b-477f-a8f5-6d8e55ca1730"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.777528 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-config" (OuterVolumeSpecName: "config") pod "a59755b7-522b-477f-a8f5-6d8e55ca1730" (UID: "a59755b7-522b-477f-a8f5-6d8e55ca1730"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.784453 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a59755b7-522b-477f-a8f5-6d8e55ca1730" (UID: "a59755b7-522b-477f-a8f5-6d8e55ca1730"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.797655 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a59755b7-522b-477f-a8f5-6d8e55ca1730" (UID: "a59755b7-522b-477f-a8f5-6d8e55ca1730"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.837427 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.837460 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.837469 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.837478 4840 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.837489 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a59755b7-522b-477f-a8f5-6d8e55ca1730-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:16 crc kubenswrapper[4840]: I1209 17:17:16.837497 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7n6l2\" (UniqueName: \"kubernetes.io/projected/a59755b7-522b-477f-a8f5-6d8e55ca1730-kube-api-access-7n6l2\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.119239 4840 generic.go:334] "Generic (PLEG): container finished" podID="cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" containerID="75d48060e8cab7ec2cb1727c5465294b96d4b535243816d577622e4cc4be5e3e" exitCode=0 Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.119322 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" event={"ID":"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5","Type":"ContainerDied","Data":"75d48060e8cab7ec2cb1727c5465294b96d4b535243816d577622e4cc4be5e3e"} Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.124256 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4","Type":"ContainerStarted","Data":"1ed673555c6478b1c9ef6c59fe021f6611cc83703e338ecb060c8581153048b5"} Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.124390 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4","Type":"ContainerStarted","Data":"475091edc79ed8d8eb986e673ed3446ffdd17294931d681f338172e728fb0cbe"} Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.129155 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jnkjx" event={"ID":"87c31a2f-f8da-4391-91b7-16544aceaf18","Type":"ContainerStarted","Data":"b1b0924cd3493f8478df6e91773d50a15973bd7df22a8c7365b6eae2e708f66b"} Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.135877 4840 generic.go:334] "Generic (PLEG): container finished" podID="a59755b7-522b-477f-a8f5-6d8e55ca1730" containerID="c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e" exitCode=0 Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.136009 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" event={"ID":"a59755b7-522b-477f-a8f5-6d8e55ca1730","Type":"ContainerDied","Data":"c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e"} Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.136028 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" event={"ID":"a59755b7-522b-477f-a8f5-6d8e55ca1730","Type":"ContainerDied","Data":"c06c905203e299e4091a6c1e772020d35c719d76c5741b6f6206947596a1e44f"} Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.136045 4840 scope.go:117] "RemoveContainer" containerID="c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e" Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.136172 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-dhw7j" Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.154292 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zthv5" event={"ID":"e9144e8b-9235-4e97-83a5-7525f0986083","Type":"ContainerStarted","Data":"f55e4bae8f11978444d2b44fef67fc83574388fbb8ae5e524673f04682ef2bff"} Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.216728 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-jnkjx" podStartSLOduration=4.372849606 podStartE2EDuration="36.216698782s" podCreationTimestamp="2025-12-09 17:16:41 +0000 UTC" firstStartedPulling="2025-12-09 17:16:42.49467742 +0000 UTC m=+1188.485788053" lastFinishedPulling="2025-12-09 17:17:14.338526596 +0000 UTC m=+1220.329637229" observedRunningTime="2025-12-09 17:17:17.216390014 +0000 UTC m=+1223.207500647" watchObservedRunningTime="2025-12-09 17:17:17.216698782 +0000 UTC m=+1223.207809415" Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.234744 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=17.234723501 podStartE2EDuration="17.234723501s" podCreationTimestamp="2025-12-09 17:17:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:17.178085281 +0000 UTC m=+1223.169195934" watchObservedRunningTime="2025-12-09 17:17:17.234723501 +0000 UTC m=+1223.225834134" Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.249296 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-zthv5" podStartSLOduration=4.24927449 podStartE2EDuration="4.24927449s" podCreationTimestamp="2025-12-09 17:17:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:17.234148395 +0000 UTC m=+1223.225259048" watchObservedRunningTime="2025-12-09 17:17:17.24927449 +0000 UTC m=+1223.240385123" Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.370817 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-dhw7j"] Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.391892 4840 scope.go:117] "RemoveContainer" containerID="c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e" Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.392331 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-dhw7j"] Dec 09 17:17:17 crc kubenswrapper[4840]: E1209 17:17:17.395722 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e\": container with ID starting with c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e not found: ID does not exist" containerID="c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e" Dec 09 17:17:17 crc kubenswrapper[4840]: I1209 17:17:17.396334 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e"} err="failed to get container status \"c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e\": rpc error: code = NotFound desc = could not find container \"c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e\": container with ID starting with c7822fe5037628a8331316f51c0408e0e47d7ac640eb138eafe2ccfa73f3eb2e not found: ID does not exist" Dec 09 17:17:18 crc kubenswrapper[4840]: I1209 17:17:18.173452 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" event={"ID":"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5","Type":"ContainerStarted","Data":"b4e1edc5396069c990d0b9ba62bb4dcc82964589008fccfd92b4c09dadd97eec"} Dec 09 17:17:18 crc kubenswrapper[4840]: I1209 17:17:18.173800 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:18 crc kubenswrapper[4840]: I1209 17:17:18.204731 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" podStartSLOduration=5.204469325 podStartE2EDuration="5.204469325s" podCreationTimestamp="2025-12-09 17:17:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:18.197958977 +0000 UTC m=+1224.189069610" watchObservedRunningTime="2025-12-09 17:17:18.204469325 +0000 UTC m=+1224.195579958" Dec 09 17:17:18 crc kubenswrapper[4840]: I1209 17:17:18.631130 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a59755b7-522b-477f-a8f5-6d8e55ca1730" path="/var/lib/kubelet/pods/a59755b7-522b-477f-a8f5-6d8e55ca1730/volumes" Dec 09 17:17:21 crc kubenswrapper[4840]: I1209 17:17:21.216555 4840 generic.go:334] "Generic (PLEG): container finished" podID="27193ac5-22ed-4e67-b349-896bb73c6512" containerID="1bcf87ff53df7a1fcef6a8c1425b5d1044fa761ab24bf2f2c82cd5d739dd0fcf" exitCode=0 Dec 09 17:17:21 crc kubenswrapper[4840]: I1209 17:17:21.216753 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lvtn5" event={"ID":"27193ac5-22ed-4e67-b349-896bb73c6512","Type":"ContainerDied","Data":"1bcf87ff53df7a1fcef6a8c1425b5d1044fa761ab24bf2f2c82cd5d739dd0fcf"} Dec 09 17:17:21 crc kubenswrapper[4840]: I1209 17:17:21.648164 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:24 crc kubenswrapper[4840]: I1209 17:17:24.221241 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:17:24 crc kubenswrapper[4840]: I1209 17:17:24.298645 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-vtjtl"] Dec 09 17:17:24 crc kubenswrapper[4840]: I1209 17:17:24.298918 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" podUID="cc6d69d7-b580-4da6-8233-9e522a3674cd" containerName="dnsmasq-dns" containerID="cri-o://badf747f4c324eb05e94248b4d2d4500b35c1e5287a088f8422e582c0fe04b0b" gracePeriod=10 Dec 09 17:17:25 crc kubenswrapper[4840]: I1209 17:17:25.279392 4840 generic.go:334] "Generic (PLEG): container finished" podID="cc6d69d7-b580-4da6-8233-9e522a3674cd" containerID="badf747f4c324eb05e94248b4d2d4500b35c1e5287a088f8422e582c0fe04b0b" exitCode=0 Dec 09 17:17:25 crc kubenswrapper[4840]: I1209 17:17:25.279471 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" event={"ID":"cc6d69d7-b580-4da6-8233-9e522a3674cd","Type":"ContainerDied","Data":"badf747f4c324eb05e94248b4d2d4500b35c1e5287a088f8422e582c0fe04b0b"} Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.193811 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.317395 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lvtn5" event={"ID":"27193ac5-22ed-4e67-b349-896bb73c6512","Type":"ContainerDied","Data":"c63f74592524846134158381dc90bf5c6105fd307b8291e89b6dfe7590acd8e1"} Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.317443 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c63f74592524846134158381dc90bf5c6105fd307b8291e89b6dfe7590acd8e1" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.317504 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lvtn5" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.371075 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-fernet-keys\") pod \"27193ac5-22ed-4e67-b349-896bb73c6512\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.371167 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6htp\" (UniqueName: \"kubernetes.io/projected/27193ac5-22ed-4e67-b349-896bb73c6512-kube-api-access-w6htp\") pod \"27193ac5-22ed-4e67-b349-896bb73c6512\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.371364 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-credential-keys\") pod \"27193ac5-22ed-4e67-b349-896bb73c6512\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.371434 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-config-data\") pod \"27193ac5-22ed-4e67-b349-896bb73c6512\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.371491 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-scripts\") pod \"27193ac5-22ed-4e67-b349-896bb73c6512\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.371541 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-combined-ca-bundle\") pod \"27193ac5-22ed-4e67-b349-896bb73c6512\" (UID: \"27193ac5-22ed-4e67-b349-896bb73c6512\") " Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.383214 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-scripts" (OuterVolumeSpecName: "scripts") pod "27193ac5-22ed-4e67-b349-896bb73c6512" (UID: "27193ac5-22ed-4e67-b349-896bb73c6512"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.383240 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "27193ac5-22ed-4e67-b349-896bb73c6512" (UID: "27193ac5-22ed-4e67-b349-896bb73c6512"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.383303 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27193ac5-22ed-4e67-b349-896bb73c6512-kube-api-access-w6htp" (OuterVolumeSpecName: "kube-api-access-w6htp") pod "27193ac5-22ed-4e67-b349-896bb73c6512" (UID: "27193ac5-22ed-4e67-b349-896bb73c6512"). InnerVolumeSpecName "kube-api-access-w6htp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.384619 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "27193ac5-22ed-4e67-b349-896bb73c6512" (UID: "27193ac5-22ed-4e67-b349-896bb73c6512"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.402224 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27193ac5-22ed-4e67-b349-896bb73c6512" (UID: "27193ac5-22ed-4e67-b349-896bb73c6512"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.404924 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-config-data" (OuterVolumeSpecName: "config-data") pod "27193ac5-22ed-4e67-b349-896bb73c6512" (UID: "27193ac5-22ed-4e67-b349-896bb73c6512"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.473849 4840 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.473882 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.473891 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.473900 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.473908 4840 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/27193ac5-22ed-4e67-b349-896bb73c6512-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:27 crc kubenswrapper[4840]: I1209 17:17:27.473919 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6htp\" (UniqueName: \"kubernetes.io/projected/27193ac5-22ed-4e67-b349-896bb73c6512-kube-api-access-w6htp\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.277222 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-lvtn5"] Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.284714 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-lvtn5"] Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.374887 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-fptt8"] Dec 09 17:17:28 crc kubenswrapper[4840]: E1209 17:17:28.375365 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27193ac5-22ed-4e67-b349-896bb73c6512" containerName="keystone-bootstrap" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.375388 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="27193ac5-22ed-4e67-b349-896bb73c6512" containerName="keystone-bootstrap" Dec 09 17:17:28 crc kubenswrapper[4840]: E1209 17:17:28.375412 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a59755b7-522b-477f-a8f5-6d8e55ca1730" containerName="init" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.375419 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="a59755b7-522b-477f-a8f5-6d8e55ca1730" containerName="init" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.375618 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="a59755b7-522b-477f-a8f5-6d8e55ca1730" containerName="init" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.375636 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="27193ac5-22ed-4e67-b349-896bb73c6512" containerName="keystone-bootstrap" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.376326 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.379308 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.379361 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-ddftm" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.379906 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.382255 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.382819 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-fptt8"] Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.496531 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-fernet-keys\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.496589 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-config-data\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.496639 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-credential-keys\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.496666 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-combined-ca-bundle\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.497001 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-scripts\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.497036 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zlx6\" (UniqueName: \"kubernetes.io/projected/18f6c52e-2e47-442e-80fe-a03f7b9582fe-kube-api-access-9zlx6\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.599220 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-scripts\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.599298 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zlx6\" (UniqueName: \"kubernetes.io/projected/18f6c52e-2e47-442e-80fe-a03f7b9582fe-kube-api-access-9zlx6\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.599392 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-fernet-keys\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.599423 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-config-data\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.599461 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-credential-keys\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.599487 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-combined-ca-bundle\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.603842 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-scripts\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.604116 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-config-data\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.604269 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-fernet-keys\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.604738 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-credential-keys\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.605586 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-combined-ca-bundle\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.617190 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zlx6\" (UniqueName: \"kubernetes.io/projected/18f6c52e-2e47-442e-80fe-a03f7b9582fe-kube-api-access-9zlx6\") pod \"keystone-bootstrap-fptt8\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.621211 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27193ac5-22ed-4e67-b349-896bb73c6512" path="/var/lib/kubelet/pods/27193ac5-22ed-4e67-b349-896bb73c6512/volumes" Dec 09 17:17:28 crc kubenswrapper[4840]: I1209 17:17:28.715266 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:30 crc kubenswrapper[4840]: I1209 17:17:30.788366 4840 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","burstable","pode90b0e97-f543-4d75-bb6d-8d96c7b3d663"] err="unable to destroy cgroup paths for cgroup [kubepods burstable pode90b0e97-f543-4d75-bb6d-8d96c7b3d663] : Timed out while waiting for systemd to remove kubepods-burstable-pode90b0e97_f543_4d75_bb6d_8d96c7b3d663.slice" Dec 09 17:17:31 crc kubenswrapper[4840]: I1209 17:17:31.648355 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:31 crc kubenswrapper[4840]: I1209 17:17:31.658312 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:32 crc kubenswrapper[4840]: I1209 17:17:32.367370 4840 generic.go:334] "Generic (PLEG): container finished" podID="87c31a2f-f8da-4391-91b7-16544aceaf18" containerID="b1b0924cd3493f8478df6e91773d50a15973bd7df22a8c7365b6eae2e708f66b" exitCode=0 Dec 09 17:17:32 crc kubenswrapper[4840]: I1209 17:17:32.367468 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jnkjx" event={"ID":"87c31a2f-f8da-4391-91b7-16544aceaf18","Type":"ContainerDied","Data":"b1b0924cd3493f8478df6e91773d50a15973bd7df22a8c7365b6eae2e708f66b"} Dec 09 17:17:32 crc kubenswrapper[4840]: I1209 17:17:32.373311 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 09 17:17:33 crc kubenswrapper[4840]: I1209 17:17:33.103467 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" podUID="cc6d69d7-b580-4da6-8233-9e522a3674cd" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.141:5353: i/o timeout" Dec 09 17:17:37 crc kubenswrapper[4840]: I1209 17:17:37.418085 4840 generic.go:334] "Generic (PLEG): container finished" podID="e9144e8b-9235-4e97-83a5-7525f0986083" containerID="f55e4bae8f11978444d2b44fef67fc83574388fbb8ae5e524673f04682ef2bff" exitCode=0 Dec 09 17:17:37 crc kubenswrapper[4840]: I1209 17:17:37.418264 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zthv5" event={"ID":"e9144e8b-9235-4e97-83a5-7525f0986083","Type":"ContainerDied","Data":"f55e4bae8f11978444d2b44fef67fc83574388fbb8ae5e524673f04682ef2bff"} Dec 09 17:17:38 crc kubenswrapper[4840]: I1209 17:17:38.105179 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" podUID="cc6d69d7-b580-4da6-8233-9e522a3674cd" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.141:5353: i/o timeout" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.626769 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.710657 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-swift-storage-0\") pod \"cc6d69d7-b580-4da6-8233-9e522a3674cd\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.710976 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-svc\") pod \"cc6d69d7-b580-4da6-8233-9e522a3674cd\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.711038 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-sb\") pod \"cc6d69d7-b580-4da6-8233-9e522a3674cd\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.711066 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-nb\") pod \"cc6d69d7-b580-4da6-8233-9e522a3674cd\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.711105 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-config\") pod \"cc6d69d7-b580-4da6-8233-9e522a3674cd\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.711179 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xqnk\" (UniqueName: \"kubernetes.io/projected/cc6d69d7-b580-4da6-8233-9e522a3674cd-kube-api-access-2xqnk\") pod \"cc6d69d7-b580-4da6-8233-9e522a3674cd\" (UID: \"cc6d69d7-b580-4da6-8233-9e522a3674cd\") " Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.717181 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc6d69d7-b580-4da6-8233-9e522a3674cd-kube-api-access-2xqnk" (OuterVolumeSpecName: "kube-api-access-2xqnk") pod "cc6d69d7-b580-4da6-8233-9e522a3674cd" (UID: "cc6d69d7-b580-4da6-8233-9e522a3674cd"). InnerVolumeSpecName "kube-api-access-2xqnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.761433 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-config" (OuterVolumeSpecName: "config") pod "cc6d69d7-b580-4da6-8233-9e522a3674cd" (UID: "cc6d69d7-b580-4da6-8233-9e522a3674cd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.762435 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cc6d69d7-b580-4da6-8233-9e522a3674cd" (UID: "cc6d69d7-b580-4da6-8233-9e522a3674cd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.765539 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cc6d69d7-b580-4da6-8233-9e522a3674cd" (UID: "cc6d69d7-b580-4da6-8233-9e522a3674cd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.767274 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cc6d69d7-b580-4da6-8233-9e522a3674cd" (UID: "cc6d69d7-b580-4da6-8233-9e522a3674cd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.780522 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "cc6d69d7-b580-4da6-8233-9e522a3674cd" (UID: "cc6d69d7-b580-4da6-8233-9e522a3674cd"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.815048 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.815082 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.815094 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xqnk\" (UniqueName: \"kubernetes.io/projected/cc6d69d7-b580-4da6-8233-9e522a3674cd-kube-api-access-2xqnk\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.815106 4840 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.815117 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.815127 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc6d69d7-b580-4da6-8233-9e522a3674cd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.966360 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jnkjx" Dec 09 17:17:39 crc kubenswrapper[4840]: I1209 17:17:39.973754 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.017888 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-config\") pod \"e9144e8b-9235-4e97-83a5-7525f0986083\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.018060 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-db-sync-config-data\") pod \"87c31a2f-f8da-4391-91b7-16544aceaf18\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.018150 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6sh6\" (UniqueName: \"kubernetes.io/projected/87c31a2f-f8da-4391-91b7-16544aceaf18-kube-api-access-k6sh6\") pod \"87c31a2f-f8da-4391-91b7-16544aceaf18\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.018212 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-config-data\") pod \"87c31a2f-f8da-4391-91b7-16544aceaf18\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.018236 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-combined-ca-bundle\") pod \"87c31a2f-f8da-4391-91b7-16544aceaf18\" (UID: \"87c31a2f-f8da-4391-91b7-16544aceaf18\") " Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.018259 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6d64\" (UniqueName: \"kubernetes.io/projected/e9144e8b-9235-4e97-83a5-7525f0986083-kube-api-access-x6d64\") pod \"e9144e8b-9235-4e97-83a5-7525f0986083\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.018336 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-combined-ca-bundle\") pod \"e9144e8b-9235-4e97-83a5-7525f0986083\" (UID: \"e9144e8b-9235-4e97-83a5-7525f0986083\") " Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.022461 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87c31a2f-f8da-4391-91b7-16544aceaf18-kube-api-access-k6sh6" (OuterVolumeSpecName: "kube-api-access-k6sh6") pod "87c31a2f-f8da-4391-91b7-16544aceaf18" (UID: "87c31a2f-f8da-4391-91b7-16544aceaf18"). InnerVolumeSpecName "kube-api-access-k6sh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.022501 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "87c31a2f-f8da-4391-91b7-16544aceaf18" (UID: "87c31a2f-f8da-4391-91b7-16544aceaf18"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.022523 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9144e8b-9235-4e97-83a5-7525f0986083-kube-api-access-x6d64" (OuterVolumeSpecName: "kube-api-access-x6d64") pod "e9144e8b-9235-4e97-83a5-7525f0986083" (UID: "e9144e8b-9235-4e97-83a5-7525f0986083"). InnerVolumeSpecName "kube-api-access-x6d64". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.063620 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-config" (OuterVolumeSpecName: "config") pod "e9144e8b-9235-4e97-83a5-7525f0986083" (UID: "e9144e8b-9235-4e97-83a5-7525f0986083"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.065010 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9144e8b-9235-4e97-83a5-7525f0986083" (UID: "e9144e8b-9235-4e97-83a5-7525f0986083"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.071047 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "87c31a2f-f8da-4391-91b7-16544aceaf18" (UID: "87c31a2f-f8da-4391-91b7-16544aceaf18"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.082339 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-config-data" (OuterVolumeSpecName: "config-data") pod "87c31a2f-f8da-4391-91b7-16544aceaf18" (UID: "87c31a2f-f8da-4391-91b7-16544aceaf18"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.121009 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.121048 4840 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.121063 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6sh6\" (UniqueName: \"kubernetes.io/projected/87c31a2f-f8da-4391-91b7-16544aceaf18-kube-api-access-k6sh6\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.121076 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.121087 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87c31a2f-f8da-4391-91b7-16544aceaf18-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.121098 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6d64\" (UniqueName: \"kubernetes.io/projected/e9144e8b-9235-4e97-83a5-7525f0986083-kube-api-access-x6d64\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.121107 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9144e8b-9235-4e97-83a5-7525f0986083-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.458238 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jnkjx" event={"ID":"87c31a2f-f8da-4391-91b7-16544aceaf18","Type":"ContainerDied","Data":"a2e843b771a8c53c52b993113aff139397c477ba2d0d570b5d5f4fc35a29b917"} Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.458290 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2e843b771a8c53c52b993113aff139397c477ba2d0d570b5d5f4fc35a29b917" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.458682 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jnkjx" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.460774 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.460794 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" event={"ID":"cc6d69d7-b580-4da6-8233-9e522a3674cd","Type":"ContainerDied","Data":"576df8ae96d2671208e606c453cfac71ade1c3af6a0c29287b0d6e5a04127511"} Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.460856 4840 scope.go:117] "RemoveContainer" containerID="badf747f4c324eb05e94248b4d2d4500b35c1e5287a088f8422e582c0fe04b0b" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.462448 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zthv5" event={"ID":"e9144e8b-9235-4e97-83a5-7525f0986083","Type":"ContainerDied","Data":"9e0467913937259ccb483d9bf28f0b79d0fde01be5dab4c06699424922aa062e"} Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.462484 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e0467913937259ccb483d9bf28f0b79d0fde01be5dab4c06699424922aa062e" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.462490 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zthv5" Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.502786 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-vtjtl"] Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.513000 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-vtjtl"] Dec 09 17:17:40 crc kubenswrapper[4840]: I1209 17:17:40.626944 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc6d69d7-b580-4da6-8233-9e522a3674cd" path="/var/lib/kubelet/pods/cc6d69d7-b580-4da6-8233-9e522a3674cd/volumes" Dec 09 17:17:41 crc kubenswrapper[4840]: E1209 17:17:41.126565 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Dec 09 17:17:41 crc kubenswrapper[4840]: E1209 17:17:41.126718 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t5sfs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-qv6wf_openstack(7ff5b771-f400-4f66-9d95-9f66fff18a82): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 17:17:41 crc kubenswrapper[4840]: E1209 17:17:41.149442 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-qv6wf" podUID="7ff5b771-f400-4f66-9d95-9f66fff18a82" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.298354 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6664c6795f-p7gdd"] Dec 09 17:17:41 crc kubenswrapper[4840]: E1209 17:17:41.298778 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87c31a2f-f8da-4391-91b7-16544aceaf18" containerName="glance-db-sync" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.298790 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="87c31a2f-f8da-4391-91b7-16544aceaf18" containerName="glance-db-sync" Dec 09 17:17:41 crc kubenswrapper[4840]: E1209 17:17:41.298805 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc6d69d7-b580-4da6-8233-9e522a3674cd" containerName="dnsmasq-dns" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.298810 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc6d69d7-b580-4da6-8233-9e522a3674cd" containerName="dnsmasq-dns" Dec 09 17:17:41 crc kubenswrapper[4840]: E1209 17:17:41.298829 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9144e8b-9235-4e97-83a5-7525f0986083" containerName="neutron-db-sync" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.298835 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9144e8b-9235-4e97-83a5-7525f0986083" containerName="neutron-db-sync" Dec 09 17:17:41 crc kubenswrapper[4840]: E1209 17:17:41.298843 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc6d69d7-b580-4da6-8233-9e522a3674cd" containerName="init" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.298849 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc6d69d7-b580-4da6-8233-9e522a3674cd" containerName="init" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.299049 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="87c31a2f-f8da-4391-91b7-16544aceaf18" containerName="glance-db-sync" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.299062 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc6d69d7-b580-4da6-8233-9e522a3674cd" containerName="dnsmasq-dns" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.299076 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9144e8b-9235-4e97-83a5-7525f0986083" containerName="neutron-db-sync" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.300076 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.311265 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6664c6795f-p7gdd"] Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.348210 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-57dff6db4d-sszz8"] Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.349655 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.354283 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-svc\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.354332 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-swift-storage-0\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.354360 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-sb\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.354376 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlrtr\" (UniqueName: \"kubernetes.io/projected/6bb3f723-80b7-4caa-92fa-82389ab19bbb-kube-api-access-nlrtr\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.354433 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-nb\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.354451 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-config\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.359338 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.359511 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-qtt8g" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.361418 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.361556 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.369582 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-57dff6db4d-sszz8"] Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.432093 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6664c6795f-p7gdd"] Dec 09 17:17:41 crc kubenswrapper[4840]: E1209 17:17:41.433017 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc dns-swift-storage-0 kube-api-access-nlrtr ovsdbserver-nb ovsdbserver-sb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" podUID="6bb3f723-80b7-4caa-92fa-82389ab19bbb" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.455794 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-httpd-config\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.455838 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-ovndb-tls-certs\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.455870 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6mnm\" (UniqueName: \"kubernetes.io/projected/a4e32138-7ab5-4f68-bad5-554ba844c8a0-kube-api-access-j6mnm\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.455939 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-svc\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.455976 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-combined-ca-bundle\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.455995 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-swift-storage-0\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.456016 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-sb\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.456032 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlrtr\" (UniqueName: \"kubernetes.io/projected/6bb3f723-80b7-4caa-92fa-82389ab19bbb-kube-api-access-nlrtr\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.456072 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-config\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.456108 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-nb\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.456128 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-config\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.456901 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-config\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.457468 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-svc\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.457953 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-swift-storage-0\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.458140 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-k2r4h"] Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.459441 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-nb\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.459094 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-sb\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.461060 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.472092 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-k2r4h"] Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.482200 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.485680 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlrtr\" (UniqueName: \"kubernetes.io/projected/6bb3f723-80b7-4caa-92fa-82389ab19bbb-kube-api-access-nlrtr\") pod \"dnsmasq-dns-6664c6795f-p7gdd\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: E1209 17:17:41.486427 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-qv6wf" podUID="7ff5b771-f400-4f66-9d95-9f66fff18a82" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.541768 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.557380 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv4z6\" (UniqueName: \"kubernetes.io/projected/eae88523-ef33-475f-9d10-ad400eb13260-kube-api-access-jv4z6\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.557427 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.557485 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-combined-ca-bundle\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.557515 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.557541 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-config\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.557569 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-config\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.557631 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.557678 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-httpd-config\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.557698 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-ovndb-tls-certs\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.557737 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.557766 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6mnm\" (UniqueName: \"kubernetes.io/projected/a4e32138-7ab5-4f68-bad5-554ba844c8a0-kube-api-access-j6mnm\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.574097 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-ovndb-tls-certs\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.574655 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-combined-ca-bundle\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.580857 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-config\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.590301 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-httpd-config\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.612807 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6mnm\" (UniqueName: \"kubernetes.io/projected/a4e32138-7ab5-4f68-bad5-554ba844c8a0-kube-api-access-j6mnm\") pod \"neutron-57dff6db4d-sszz8\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.660505 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.660574 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv4z6\" (UniqueName: \"kubernetes.io/projected/eae88523-ef33-475f-9d10-ad400eb13260-kube-api-access-jv4z6\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.660596 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.660640 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.660704 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-config\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.660753 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.661583 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.663263 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.663957 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.664775 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-config\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.676311 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.683004 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.705668 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv4z6\" (UniqueName: \"kubernetes.io/projected/eae88523-ef33-475f-9d10-ad400eb13260-kube-api-access-jv4z6\") pod \"dnsmasq-dns-5ccc5c4795-k2r4h\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.763617 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-swift-storage-0\") pod \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.763682 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-sb\") pod \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.763723 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-config\") pod \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.763812 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-nb\") pod \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.763838 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlrtr\" (UniqueName: \"kubernetes.io/projected/6bb3f723-80b7-4caa-92fa-82389ab19bbb-kube-api-access-nlrtr\") pod \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.763877 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-svc\") pod \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\" (UID: \"6bb3f723-80b7-4caa-92fa-82389ab19bbb\") " Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.764455 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-config" (OuterVolumeSpecName: "config") pod "6bb3f723-80b7-4caa-92fa-82389ab19bbb" (UID: "6bb3f723-80b7-4caa-92fa-82389ab19bbb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.764783 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6bb3f723-80b7-4caa-92fa-82389ab19bbb" (UID: "6bb3f723-80b7-4caa-92fa-82389ab19bbb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.764992 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6bb3f723-80b7-4caa-92fa-82389ab19bbb" (UID: "6bb3f723-80b7-4caa-92fa-82389ab19bbb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.765247 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6bb3f723-80b7-4caa-92fa-82389ab19bbb" (UID: "6bb3f723-80b7-4caa-92fa-82389ab19bbb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.765602 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6bb3f723-80b7-4caa-92fa-82389ab19bbb" (UID: "6bb3f723-80b7-4caa-92fa-82389ab19bbb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.785723 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bb3f723-80b7-4caa-92fa-82389ab19bbb-kube-api-access-nlrtr" (OuterVolumeSpecName: "kube-api-access-nlrtr") pod "6bb3f723-80b7-4caa-92fa-82389ab19bbb" (UID: "6bb3f723-80b7-4caa-92fa-82389ab19bbb"). InnerVolumeSpecName "kube-api-access-nlrtr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.824558 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.865710 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.865741 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlrtr\" (UniqueName: \"kubernetes.io/projected/6bb3f723-80b7-4caa-92fa-82389ab19bbb-kube-api-access-nlrtr\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.865751 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.865760 4840 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.865769 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:41 crc kubenswrapper[4840]: I1209 17:17:41.865776 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bb3f723-80b7-4caa-92fa-82389ab19bbb-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.224819 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.226799 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.229136 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-sjvvc" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.229328 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.229381 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.246404 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.374625 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.374665 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-config-data\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.374703 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.374794 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-logs\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.374869 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h68wt\" (UniqueName: \"kubernetes.io/projected/6b94e8f2-feeb-45a5-8716-e0364b9662eb-kube-api-access-h68wt\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.374897 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-scripts\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.374949 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.477129 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.477178 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-config-data\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.477212 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.477238 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-logs\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.477283 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h68wt\" (UniqueName: \"kubernetes.io/projected/6b94e8f2-feeb-45a5-8716-e0364b9662eb-kube-api-access-h68wt\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.477302 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-scripts\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.477333 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.477816 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.478040 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-logs\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.481065 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.481112 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/38cd25422c2a393197855a396291af132e300524db65f2672b792068080a1237/globalmount\"" pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.483746 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-scripts\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.487567 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.494595 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h68wt\" (UniqueName: \"kubernetes.io/projected/6b94e8f2-feeb-45a5-8716-e0364b9662eb-kube-api-access-h68wt\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.494984 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-config-data\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.495052 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6664c6795f-p7gdd" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.518860 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.521009 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.523447 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.529545 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.529773 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.553233 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.588914 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6664c6795f-p7gdd"] Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.599513 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6664c6795f-p7gdd"] Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.637009 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bb3f723-80b7-4caa-92fa-82389ab19bbb" path="/var/lib/kubelet/pods/6bb3f723-80b7-4caa-92fa-82389ab19bbb/volumes" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.688517 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-logs\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.688624 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.688649 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t8wx\" (UniqueName: \"kubernetes.io/projected/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-kube-api-access-7t8wx\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.688680 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.688710 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.688757 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.688810 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.790788 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.790851 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t8wx\" (UniqueName: \"kubernetes.io/projected/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-kube-api-access-7t8wx\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.790886 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.790913 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.790989 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.791069 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.791219 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-logs\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.791711 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-logs\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.793276 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.795780 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.795815 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e28d7941ce676756577bb740fab8e92889d07f3d1c4bbddbbdbd9c7d965e46e3/globalmount\"" pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.796615 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.797052 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.798891 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.820225 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t8wx\" (UniqueName: \"kubernetes.io/projected/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-kube-api-access-7t8wx\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:42 crc kubenswrapper[4840]: I1209 17:17:42.858637 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:43 crc kubenswrapper[4840]: I1209 17:17:43.105494 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-vtjtl" podUID="cc6d69d7-b580-4da6-8233-9e522a3674cd" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.141:5353: i/o timeout" Dec 09 17:17:43 crc kubenswrapper[4840]: I1209 17:17:43.163760 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 17:17:45 crc kubenswrapper[4840]: I1209 17:17:45.901614 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:17:45 crc kubenswrapper[4840]: I1209 17:17:45.975293 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.156465 4840 scope.go:117] "RemoveContainer" containerID="d3f8bd56dbb7fe79ac20380adcc8b6b91d6346d5e9312a59d1c57072dea569a3" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.461710 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-768d86bb9c-skrvq"] Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.463850 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.466615 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.466624 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.489399 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-768d86bb9c-skrvq"] Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.583207 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-ovndb-tls-certs\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.583257 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-config\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.583503 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-public-tls-certs\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.583685 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-httpd-config\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.583736 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-internal-tls-certs\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.583870 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-combined-ca-bundle\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.584008 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fnn9\" (UniqueName: \"kubernetes.io/projected/4ebfb39f-3df3-4538-b69e-5366dc52b442-kube-api-access-8fnn9\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.611250 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-fptt8"] Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.685857 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-ovndb-tls-certs\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.685925 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-config\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.686048 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-public-tls-certs\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.686102 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-httpd-config\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.686130 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-internal-tls-certs\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.686165 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-combined-ca-bundle\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.686204 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fnn9\" (UniqueName: \"kubernetes.io/projected/4ebfb39f-3df3-4538-b69e-5366dc52b442-kube-api-access-8fnn9\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.692169 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-public-tls-certs\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.692899 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-combined-ca-bundle\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.693040 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-internal-tls-certs\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.693044 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-config\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.693089 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-httpd-config\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.710517 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ebfb39f-3df3-4538-b69e-5366dc52b442-ovndb-tls-certs\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.714469 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fnn9\" (UniqueName: \"kubernetes.io/projected/4ebfb39f-3df3-4538-b69e-5366dc52b442-kube-api-access-8fnn9\") pod \"neutron-768d86bb9c-skrvq\" (UID: \"4ebfb39f-3df3-4538-b69e-5366dc52b442\") " pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: I1209 17:17:47.794811 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:47 crc kubenswrapper[4840]: W1209 17:17:47.994952 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f6c52e_2e47_442e_80fe_a03f7b9582fe.slice/crio-3cba781eb1a23b331b4482d7a007e9bda24e64b87799ef7b46741feabbf661bc WatchSource:0}: Error finding container 3cba781eb1a23b331b4482d7a007e9bda24e64b87799ef7b46741feabbf661bc: Status 404 returned error can't find the container with id 3cba781eb1a23b331b4482d7a007e9bda24e64b87799ef7b46741feabbf661bc Dec 09 17:17:48 crc kubenswrapper[4840]: E1209 17:17:48.016511 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current" Dec 09 17:17:48 crc kubenswrapper[4840]: E1209 17:17:48.016628 4840 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current" Dec 09 17:17:48 crc kubenswrapper[4840]: E1209 17:17:48.016832 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pr76w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-f6mr5_openstack(c6c5b609-3028-4ff9-9bf7-88fa13784f6a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 17:17:48 crc kubenswrapper[4840]: E1209 17:17:48.019409 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cloudkitty-db-sync-f6mr5" podUID="c6c5b609-3028-4ff9-9bf7-88fa13784f6a" Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.570084 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f","Type":"ContainerStarted","Data":"ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a"} Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.573531 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4ts68" event={"ID":"64c96895-a046-41da-83d1-5cb61d38de00","Type":"ContainerStarted","Data":"8156bb35fc59e93457f66565c135b52bec2b18d319f573bf1857ced938768472"} Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.577178 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fptt8" event={"ID":"18f6c52e-2e47-442e-80fe-a03f7b9582fe","Type":"ContainerStarted","Data":"fde18d4fe6a6418a8f526ffb3379ab1015673866948bd451ae9d5b5f640f2464"} Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.577218 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fptt8" event={"ID":"18f6c52e-2e47-442e-80fe-a03f7b9582fe","Type":"ContainerStarted","Data":"3cba781eb1a23b331b4482d7a007e9bda24e64b87799ef7b46741feabbf661bc"} Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.583223 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-frlvd" event={"ID":"97895c55-e758-4bd3-981c-2c9bd5eeabcb","Type":"ContainerStarted","Data":"2d3fd5a814ca00b805df2bf63f79a515f74ad4f54b17f51a56c606d04ca23ac0"} Dec 09 17:17:48 crc kubenswrapper[4840]: E1209 17:17:48.591356 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current\\\"\"" pod="openstack/cloudkitty-db-sync-f6mr5" podUID="c6c5b609-3028-4ff9-9bf7-88fa13784f6a" Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.633044 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-4ts68" podStartSLOduration=10.888822212000001 podStartE2EDuration="35.633023773s" podCreationTimestamp="2025-12-09 17:17:13 +0000 UTC" firstStartedPulling="2025-12-09 17:17:15.209376751 +0000 UTC m=+1221.200487384" lastFinishedPulling="2025-12-09 17:17:39.953578312 +0000 UTC m=+1245.944688945" observedRunningTime="2025-12-09 17:17:48.600328146 +0000 UTC m=+1254.591438769" watchObservedRunningTime="2025-12-09 17:17:48.633023773 +0000 UTC m=+1254.624134406" Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.674225 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.701753 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-k2r4h"] Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.712409 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-fptt8" podStartSLOduration=20.712385839 podStartE2EDuration="20.712385839s" podCreationTimestamp="2025-12-09 17:17:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:48.656352467 +0000 UTC m=+1254.647463100" watchObservedRunningTime="2025-12-09 17:17:48.712385839 +0000 UTC m=+1254.703496472" Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.722148 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-frlvd" podStartSLOduration=6.295393067 podStartE2EDuration="35.722126873s" podCreationTimestamp="2025-12-09 17:17:13 +0000 UTC" firstStartedPulling="2025-12-09 17:17:15.11765059 +0000 UTC m=+1221.108761223" lastFinishedPulling="2025-12-09 17:17:44.544384396 +0000 UTC m=+1250.535495029" observedRunningTime="2025-12-09 17:17:48.671469932 +0000 UTC m=+1254.662580565" watchObservedRunningTime="2025-12-09 17:17:48.722126873 +0000 UTC m=+1254.713237496" Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.814515 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:17:48 crc kubenswrapper[4840]: I1209 17:17:48.909001 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-768d86bb9c-skrvq"] Dec 09 17:17:48 crc kubenswrapper[4840]: W1209 17:17:48.924538 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ebfb39f_3df3_4538_b69e_5366dc52b442.slice/crio-4de6cac474646d0672eba5b2534fdc336ab7940cb6b9323a39fd47b504a10e77 WatchSource:0}: Error finding container 4de6cac474646d0672eba5b2534fdc336ab7940cb6b9323a39fd47b504a10e77: Status 404 returned error can't find the container with id 4de6cac474646d0672eba5b2534fdc336ab7940cb6b9323a39fd47b504a10e77 Dec 09 17:17:49 crc kubenswrapper[4840]: I1209 17:17:49.422185 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-57dff6db4d-sszz8"] Dec 09 17:17:49 crc kubenswrapper[4840]: I1209 17:17:49.602789 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6b94e8f2-feeb-45a5-8716-e0364b9662eb","Type":"ContainerStarted","Data":"381b796dea7edd3ade65db5d056823476df0245354470202e4a7e8239ed6ae1f"} Dec 09 17:17:49 crc kubenswrapper[4840]: I1209 17:17:49.608233 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5c3f6684-24bb-4d86-bd48-e0ffae114cb1","Type":"ContainerStarted","Data":"57979c0662bdab1bdba4db7991a01c9932086f9cca3476b08c798ba1399dfb79"} Dec 09 17:17:49 crc kubenswrapper[4840]: I1209 17:17:49.619326 4840 generic.go:334] "Generic (PLEG): container finished" podID="eae88523-ef33-475f-9d10-ad400eb13260" containerID="54ebec986f16a325cdd6a6c3c0c59bcd15e0ecd2927d667e60f736936e0ea2b5" exitCode=0 Dec 09 17:17:49 crc kubenswrapper[4840]: I1209 17:17:49.619418 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" event={"ID":"eae88523-ef33-475f-9d10-ad400eb13260","Type":"ContainerDied","Data":"54ebec986f16a325cdd6a6c3c0c59bcd15e0ecd2927d667e60f736936e0ea2b5"} Dec 09 17:17:49 crc kubenswrapper[4840]: I1209 17:17:49.619472 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" event={"ID":"eae88523-ef33-475f-9d10-ad400eb13260","Type":"ContainerStarted","Data":"115c61e6da2131fa7a33b5881b1366cc710ccefe7e73367b229cc501cb2c794b"} Dec 09 17:17:49 crc kubenswrapper[4840]: I1209 17:17:49.623595 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57dff6db4d-sszz8" event={"ID":"a4e32138-7ab5-4f68-bad5-554ba844c8a0","Type":"ContainerStarted","Data":"3061786023d6c9c30e936f58a302b442939a7b7617ccd3a485bf405b1d382cfe"} Dec 09 17:17:49 crc kubenswrapper[4840]: I1209 17:17:49.628124 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-768d86bb9c-skrvq" event={"ID":"4ebfb39f-3df3-4538-b69e-5366dc52b442","Type":"ContainerStarted","Data":"0424798ece6d883d190c9f46f55dff7c6c4e56567b12ac9c6cb78aba17d5a828"} Dec 09 17:17:49 crc kubenswrapper[4840]: I1209 17:17:49.628160 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-768d86bb9c-skrvq" event={"ID":"4ebfb39f-3df3-4538-b69e-5366dc52b442","Type":"ContainerStarted","Data":"4de6cac474646d0672eba5b2534fdc336ab7940cb6b9323a39fd47b504a10e77"} Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.663211 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6b94e8f2-feeb-45a5-8716-e0364b9662eb","Type":"ContainerStarted","Data":"423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d"} Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.682382 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5c3f6684-24bb-4d86-bd48-e0ffae114cb1","Type":"ContainerStarted","Data":"2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773"} Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.682442 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5c3f6684-24bb-4d86-bd48-e0ffae114cb1","Type":"ContainerStarted","Data":"4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816"} Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.682612 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5c3f6684-24bb-4d86-bd48-e0ffae114cb1" containerName="glance-log" containerID="cri-o://4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816" gracePeriod=30 Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.683389 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5c3f6684-24bb-4d86-bd48-e0ffae114cb1" containerName="glance-httpd" containerID="cri-o://2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773" gracePeriod=30 Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.695265 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" event={"ID":"eae88523-ef33-475f-9d10-ad400eb13260","Type":"ContainerStarted","Data":"369b3823746f5e78bbe3243adbb133079513a212da3e0c49b02c7d6dd605a323"} Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.695622 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.697450 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57dff6db4d-sszz8" event={"ID":"a4e32138-7ab5-4f68-bad5-554ba844c8a0","Type":"ContainerStarted","Data":"ff658c597a8102e9b2fe318077182e1f58c711b231db3f360ee95859b37969a8"} Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.710322 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-768d86bb9c-skrvq" event={"ID":"4ebfb39f-3df3-4538-b69e-5366dc52b442","Type":"ContainerStarted","Data":"613cd2f9f5ac87fb6efb30bec731ee9976af58cbd8d80fc7e0c7e8188e458dfe"} Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.710611 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.732093 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=10.732072456000001 podStartE2EDuration="10.732072456s" podCreationTimestamp="2025-12-09 17:17:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:51.704088461 +0000 UTC m=+1257.695199104" watchObservedRunningTime="2025-12-09 17:17:51.732072456 +0000 UTC m=+1257.723183079" Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.745247 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" podStartSLOduration=10.745232285 podStartE2EDuration="10.745232285s" podCreationTimestamp="2025-12-09 17:17:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:51.726649844 +0000 UTC m=+1257.717760487" watchObservedRunningTime="2025-12-09 17:17:51.745232285 +0000 UTC m=+1257.736342918" Dec 09 17:17:51 crc kubenswrapper[4840]: I1209 17:17:51.765075 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-768d86bb9c-skrvq" podStartSLOduration=4.765056061 podStartE2EDuration="4.765056061s" podCreationTimestamp="2025-12-09 17:17:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:51.746669326 +0000 UTC m=+1257.737779969" watchObservedRunningTime="2025-12-09 17:17:51.765056061 +0000 UTC m=+1257.756166694" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.270871 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.328892 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-scripts\") pod \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.329043 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-config-data\") pod \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.329165 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-combined-ca-bundle\") pod \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.329397 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.329441 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-httpd-run\") pod \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.329489 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-logs\") pod \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.329529 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7t8wx\" (UniqueName: \"kubernetes.io/projected/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-kube-api-access-7t8wx\") pod \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\" (UID: \"5c3f6684-24bb-4d86-bd48-e0ffae114cb1\") " Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.330296 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-logs" (OuterVolumeSpecName: "logs") pod "5c3f6684-24bb-4d86-bd48-e0ffae114cb1" (UID: "5c3f6684-24bb-4d86-bd48-e0ffae114cb1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.330596 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.331331 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5c3f6684-24bb-4d86-bd48-e0ffae114cb1" (UID: "5c3f6684-24bb-4d86-bd48-e0ffae114cb1"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.366534 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-scripts" (OuterVolumeSpecName: "scripts") pod "5c3f6684-24bb-4d86-bd48-e0ffae114cb1" (UID: "5c3f6684-24bb-4d86-bd48-e0ffae114cb1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.372806 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-kube-api-access-7t8wx" (OuterVolumeSpecName: "kube-api-access-7t8wx") pod "5c3f6684-24bb-4d86-bd48-e0ffae114cb1" (UID: "5c3f6684-24bb-4d86-bd48-e0ffae114cb1"). InnerVolumeSpecName "kube-api-access-7t8wx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.376504 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc" (OuterVolumeSpecName: "glance") pod "5c3f6684-24bb-4d86-bd48-e0ffae114cb1" (UID: "5c3f6684-24bb-4d86-bd48-e0ffae114cb1"). InnerVolumeSpecName "pvc-f0e7e026-17da-49b6-acf8-808958fa33bc". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.413171 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5c3f6684-24bb-4d86-bd48-e0ffae114cb1" (UID: "5c3f6684-24bb-4d86-bd48-e0ffae114cb1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.423386 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-config-data" (OuterVolumeSpecName: "config-data") pod "5c3f6684-24bb-4d86-bd48-e0ffae114cb1" (UID: "5c3f6684-24bb-4d86-bd48-e0ffae114cb1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.432563 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7t8wx\" (UniqueName: \"kubernetes.io/projected/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-kube-api-access-7t8wx\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.432610 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.432622 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.432633 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.432672 4840 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") on node \"crc\" " Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.432686 4840 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5c3f6684-24bb-4d86-bd48-e0ffae114cb1-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.459509 4840 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.459705 4840 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f0e7e026-17da-49b6-acf8-808958fa33bc" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc") on node "crc" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.534935 4840 reconciler_common.go:293] "Volume detached for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.725908 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f","Type":"ContainerStarted","Data":"5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7"} Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.729314 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6b94e8f2-feeb-45a5-8716-e0364b9662eb","Type":"ContainerStarted","Data":"0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336"} Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.731816 4840 generic.go:334] "Generic (PLEG): container finished" podID="5c3f6684-24bb-4d86-bd48-e0ffae114cb1" containerID="2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773" exitCode=143 Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.731841 4840 generic.go:334] "Generic (PLEG): container finished" podID="5c3f6684-24bb-4d86-bd48-e0ffae114cb1" containerID="4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816" exitCode=143 Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.731885 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5c3f6684-24bb-4d86-bd48-e0ffae114cb1","Type":"ContainerDied","Data":"2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773"} Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.731890 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.731905 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5c3f6684-24bb-4d86-bd48-e0ffae114cb1","Type":"ContainerDied","Data":"4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816"} Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.731916 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5c3f6684-24bb-4d86-bd48-e0ffae114cb1","Type":"ContainerDied","Data":"57979c0662bdab1bdba4db7991a01c9932086f9cca3476b08c798ba1399dfb79"} Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.731933 4840 scope.go:117] "RemoveContainer" containerID="2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.735479 4840 generic.go:334] "Generic (PLEG): container finished" podID="18f6c52e-2e47-442e-80fe-a03f7b9582fe" containerID="fde18d4fe6a6418a8f526ffb3379ab1015673866948bd451ae9d5b5f640f2464" exitCode=0 Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.735534 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fptt8" event={"ID":"18f6c52e-2e47-442e-80fe-a03f7b9582fe","Type":"ContainerDied","Data":"fde18d4fe6a6418a8f526ffb3379ab1015673866948bd451ae9d5b5f640f2464"} Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.741428 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57dff6db4d-sszz8" event={"ID":"a4e32138-7ab5-4f68-bad5-554ba844c8a0","Type":"ContainerStarted","Data":"128af6f61276d2d10b57801721898ca87045fb44e4521f15733a7803df20a9e3"} Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.759826 4840 scope.go:117] "RemoveContainer" containerID="4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.784698 4840 scope.go:117] "RemoveContainer" containerID="2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773" Dec 09 17:17:52 crc kubenswrapper[4840]: E1209 17:17:52.785403 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773\": container with ID starting with 2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773 not found: ID does not exist" containerID="2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.785441 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773"} err="failed to get container status \"2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773\": rpc error: code = NotFound desc = could not find container \"2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773\": container with ID starting with 2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773 not found: ID does not exist" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.785468 4840 scope.go:117] "RemoveContainer" containerID="4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816" Dec 09 17:17:52 crc kubenswrapper[4840]: E1209 17:17:52.785901 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816\": container with ID starting with 4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816 not found: ID does not exist" containerID="4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.785990 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816"} err="failed to get container status \"4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816\": rpc error: code = NotFound desc = could not find container \"4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816\": container with ID starting with 4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816 not found: ID does not exist" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.786017 4840 scope.go:117] "RemoveContainer" containerID="2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.786535 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773"} err="failed to get container status \"2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773\": rpc error: code = NotFound desc = could not find container \"2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773\": container with ID starting with 2ed4f4890d36d5cfd0ebfbf8f0f07918ada0ad3b1a33decb2cdd57433aa78773 not found: ID does not exist" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.786555 4840 scope.go:117] "RemoveContainer" containerID="4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.787050 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816"} err="failed to get container status \"4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816\": rpc error: code = NotFound desc = could not find container \"4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816\": container with ID starting with 4c3b6ae61f9383daa1d30447abff2cd990bb72727f408c3b413039e4f8c1f816 not found: ID does not exist" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.798050 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.807259 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.816184 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.816473 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-57dff6db4d-sszz8" podStartSLOduration=11.816455728 podStartE2EDuration="11.816455728s" podCreationTimestamp="2025-12-09 17:17:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:52.788235457 +0000 UTC m=+1258.779346090" watchObservedRunningTime="2025-12-09 17:17:52.816455728 +0000 UTC m=+1258.807566361" Dec 09 17:17:52 crc kubenswrapper[4840]: E1209 17:17:52.816629 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3f6684-24bb-4d86-bd48-e0ffae114cb1" containerName="glance-log" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.816642 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3f6684-24bb-4d86-bd48-e0ffae114cb1" containerName="glance-log" Dec 09 17:17:52 crc kubenswrapper[4840]: E1209 17:17:52.816662 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3f6684-24bb-4d86-bd48-e0ffae114cb1" containerName="glance-httpd" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.816668 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3f6684-24bb-4d86-bd48-e0ffae114cb1" containerName="glance-httpd" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.816861 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c3f6684-24bb-4d86-bd48-e0ffae114cb1" containerName="glance-log" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.816880 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c3f6684-24bb-4d86-bd48-e0ffae114cb1" containerName="glance-httpd" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.817927 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.820772 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.829827 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.840340 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.944533 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.944659 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87m5l\" (UniqueName: \"kubernetes.io/projected/959b2112-8599-463a-8f23-913766ab1b4e-kube-api-access-87m5l\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.944749 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-logs\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.944835 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.944991 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.945032 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.945052 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:52 crc kubenswrapper[4840]: I1209 17:17:52.945117 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.046342 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.046398 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.046413 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.046438 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.046466 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.046518 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87m5l\" (UniqueName: \"kubernetes.io/projected/959b2112-8599-463a-8f23-913766ab1b4e-kube-api-access-87m5l\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.046549 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-logs\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.046600 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.046897 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.047176 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-logs\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.055761 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.058808 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.058882 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.059311 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.059334 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e28d7941ce676756577bb740fab8e92889d07f3d1c4bbddbbdbd9c7d965e46e3/globalmount\"" pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.062413 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.087439 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87m5l\" (UniqueName: \"kubernetes.io/projected/959b2112-8599-463a-8f23-913766ab1b4e-kube-api-access-87m5l\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.103427 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.144388 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 17:17:53 crc kubenswrapper[4840]: W1209 17:17:53.724104 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod959b2112_8599_463a_8f23_913766ab1b4e.slice/crio-3149cc9e5d10bc6cb9820f1a3bf4ebf8f7dd2972dae4b61b214c9f38621967bc WatchSource:0}: Error finding container 3149cc9e5d10bc6cb9820f1a3bf4ebf8f7dd2972dae4b61b214c9f38621967bc: Status 404 returned error can't find the container with id 3149cc9e5d10bc6cb9820f1a3bf4ebf8f7dd2972dae4b61b214c9f38621967bc Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.724670 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.763407 4840 generic.go:334] "Generic (PLEG): container finished" podID="64c96895-a046-41da-83d1-5cb61d38de00" containerID="8156bb35fc59e93457f66565c135b52bec2b18d319f573bf1857ced938768472" exitCode=0 Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.763507 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4ts68" event={"ID":"64c96895-a046-41da-83d1-5cb61d38de00","Type":"ContainerDied","Data":"8156bb35fc59e93457f66565c135b52bec2b18d319f573bf1857ced938768472"} Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.769537 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"959b2112-8599-463a-8f23-913766ab1b4e","Type":"ContainerStarted","Data":"3149cc9e5d10bc6cb9820f1a3bf4ebf8f7dd2972dae4b61b214c9f38621967bc"} Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.783798 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6b94e8f2-feeb-45a5-8716-e0364b9662eb" containerName="glance-log" containerID="cri-o://423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d" gracePeriod=30 Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.783857 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.784189 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6b94e8f2-feeb-45a5-8716-e0364b9662eb" containerName="glance-httpd" containerID="cri-o://0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336" gracePeriod=30 Dec 09 17:17:53 crc kubenswrapper[4840]: I1209 17:17:53.819604 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=12.819583851 podStartE2EDuration="12.819583851s" podCreationTimestamp="2025-12-09 17:17:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:53.813010367 +0000 UTC m=+1259.804121000" watchObservedRunningTime="2025-12-09 17:17:53.819583851 +0000 UTC m=+1259.810694484" Dec 09 17:17:54 crc kubenswrapper[4840]: E1209 17:17:54.001259 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6b94e8f2_feeb_45a5_8716_e0364b9662eb.slice/crio-423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d.scope\": RecentStats: unable to find data in memory cache]" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.316043 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.400292 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-combined-ca-bundle\") pod \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.400591 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-scripts\") pod \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.400627 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zlx6\" (UniqueName: \"kubernetes.io/projected/18f6c52e-2e47-442e-80fe-a03f7b9582fe-kube-api-access-9zlx6\") pod \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.400688 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-fernet-keys\") pod \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.400861 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-config-data\") pod \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.400891 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-credential-keys\") pod \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\" (UID: \"18f6c52e-2e47-442e-80fe-a03f7b9582fe\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.406515 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "18f6c52e-2e47-442e-80fe-a03f7b9582fe" (UID: "18f6c52e-2e47-442e-80fe-a03f7b9582fe"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.407487 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-scripts" (OuterVolumeSpecName: "scripts") pod "18f6c52e-2e47-442e-80fe-a03f7b9582fe" (UID: "18f6c52e-2e47-442e-80fe-a03f7b9582fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.407521 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "18f6c52e-2e47-442e-80fe-a03f7b9582fe" (UID: "18f6c52e-2e47-442e-80fe-a03f7b9582fe"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.413900 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18f6c52e-2e47-442e-80fe-a03f7b9582fe-kube-api-access-9zlx6" (OuterVolumeSpecName: "kube-api-access-9zlx6") pod "18f6c52e-2e47-442e-80fe-a03f7b9582fe" (UID: "18f6c52e-2e47-442e-80fe-a03f7b9582fe"). InnerVolumeSpecName "kube-api-access-9zlx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.435635 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-config-data" (OuterVolumeSpecName: "config-data") pod "18f6c52e-2e47-442e-80fe-a03f7b9582fe" (UID: "18f6c52e-2e47-442e-80fe-a03f7b9582fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.438827 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "18f6c52e-2e47-442e-80fe-a03f7b9582fe" (UID: "18f6c52e-2e47-442e-80fe-a03f7b9582fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.503916 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.503948 4840 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.503995 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.504007 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.504015 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zlx6\" (UniqueName: \"kubernetes.io/projected/18f6c52e-2e47-442e-80fe-a03f7b9582fe-kube-api-access-9zlx6\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.504026 4840 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/18f6c52e-2e47-442e-80fe-a03f7b9582fe-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.594748 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.644810 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c3f6684-24bb-4d86-bd48-e0ffae114cb1" path="/var/lib/kubelet/pods/5c3f6684-24bb-4d86-bd48-e0ffae114cb1/volumes" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.709066 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-config-data\") pod \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.709345 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h68wt\" (UniqueName: \"kubernetes.io/projected/6b94e8f2-feeb-45a5-8716-e0364b9662eb-kube-api-access-h68wt\") pod \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.709457 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.709506 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-scripts\") pod \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.709542 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-logs\") pod \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.709612 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-combined-ca-bundle\") pod \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.709636 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-httpd-run\") pod \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\" (UID: \"6b94e8f2-feeb-45a5-8716-e0364b9662eb\") " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.709990 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6b94e8f2-feeb-45a5-8716-e0364b9662eb" (UID: "6b94e8f2-feeb-45a5-8716-e0364b9662eb"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.710142 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-logs" (OuterVolumeSpecName: "logs") pod "6b94e8f2-feeb-45a5-8716-e0364b9662eb" (UID: "6b94e8f2-feeb-45a5-8716-e0364b9662eb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.711202 4840 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.711220 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b94e8f2-feeb-45a5-8716-e0364b9662eb-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.716247 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b94e8f2-feeb-45a5-8716-e0364b9662eb-kube-api-access-h68wt" (OuterVolumeSpecName: "kube-api-access-h68wt") pod "6b94e8f2-feeb-45a5-8716-e0364b9662eb" (UID: "6b94e8f2-feeb-45a5-8716-e0364b9662eb"). InnerVolumeSpecName "kube-api-access-h68wt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.719704 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-scripts" (OuterVolumeSpecName: "scripts") pod "6b94e8f2-feeb-45a5-8716-e0364b9662eb" (UID: "6b94e8f2-feeb-45a5-8716-e0364b9662eb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.725636 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56" (OuterVolumeSpecName: "glance") pod "6b94e8f2-feeb-45a5-8716-e0364b9662eb" (UID: "6b94e8f2-feeb-45a5-8716-e0364b9662eb"). InnerVolumeSpecName "pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.736267 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6b94e8f2-feeb-45a5-8716-e0364b9662eb" (UID: "6b94e8f2-feeb-45a5-8716-e0364b9662eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.761424 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-config-data" (OuterVolumeSpecName: "config-data") pod "6b94e8f2-feeb-45a5-8716-e0364b9662eb" (UID: "6b94e8f2-feeb-45a5-8716-e0364b9662eb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.813403 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fptt8" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.814351 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.814374 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.814383 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h68wt\" (UniqueName: \"kubernetes.io/projected/6b94e8f2-feeb-45a5-8716-e0364b9662eb-kube-api-access-h68wt\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.814409 4840 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") on node \"crc\" " Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.814419 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b94e8f2-feeb-45a5-8716-e0364b9662eb-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.815303 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fptt8" event={"ID":"18f6c52e-2e47-442e-80fe-a03f7b9582fe","Type":"ContainerDied","Data":"3cba781eb1a23b331b4482d7a007e9bda24e64b87799ef7b46741feabbf661bc"} Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.815335 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3cba781eb1a23b331b4482d7a007e9bda24e64b87799ef7b46741feabbf661bc" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.818243 4840 generic.go:334] "Generic (PLEG): container finished" podID="97895c55-e758-4bd3-981c-2c9bd5eeabcb" containerID="2d3fd5a814ca00b805df2bf63f79a515f74ad4f54b17f51a56c606d04ca23ac0" exitCode=0 Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.818313 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-frlvd" event={"ID":"97895c55-e758-4bd3-981c-2c9bd5eeabcb","Type":"ContainerDied","Data":"2d3fd5a814ca00b805df2bf63f79a515f74ad4f54b17f51a56c606d04ca23ac0"} Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.822522 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"959b2112-8599-463a-8f23-913766ab1b4e","Type":"ContainerStarted","Data":"67e53271296fc29a725fea28590583942bc9bfa4001eb05667e60db577c26c08"} Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.824898 4840 generic.go:334] "Generic (PLEG): container finished" podID="6b94e8f2-feeb-45a5-8716-e0364b9662eb" containerID="0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336" exitCode=0 Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.824927 4840 generic.go:334] "Generic (PLEG): container finished" podID="6b94e8f2-feeb-45a5-8716-e0364b9662eb" containerID="423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d" exitCode=143 Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.824976 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6b94e8f2-feeb-45a5-8716-e0364b9662eb","Type":"ContainerDied","Data":"0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336"} Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.825004 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6b94e8f2-feeb-45a5-8716-e0364b9662eb","Type":"ContainerDied","Data":"423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d"} Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.825016 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6b94e8f2-feeb-45a5-8716-e0364b9662eb","Type":"ContainerDied","Data":"381b796dea7edd3ade65db5d056823476df0245354470202e4a7e8239ed6ae1f"} Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.825033 4840 scope.go:117] "RemoveContainer" containerID="0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.825192 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.846306 4840 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.846445 4840 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56") on node "crc" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.898500 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.916272 4840 reconciler_common.go:293] "Volume detached for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.922145 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.939124 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:17:54 crc kubenswrapper[4840]: E1209 17:17:54.939629 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b94e8f2-feeb-45a5-8716-e0364b9662eb" containerName="glance-log" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.939646 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b94e8f2-feeb-45a5-8716-e0364b9662eb" containerName="glance-log" Dec 09 17:17:54 crc kubenswrapper[4840]: E1209 17:17:54.939660 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b94e8f2-feeb-45a5-8716-e0364b9662eb" containerName="glance-httpd" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.939668 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b94e8f2-feeb-45a5-8716-e0364b9662eb" containerName="glance-httpd" Dec 09 17:17:54 crc kubenswrapper[4840]: E1209 17:17:54.939701 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18f6c52e-2e47-442e-80fe-a03f7b9582fe" containerName="keystone-bootstrap" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.939710 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="18f6c52e-2e47-442e-80fe-a03f7b9582fe" containerName="keystone-bootstrap" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.939915 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="18f6c52e-2e47-442e-80fe-a03f7b9582fe" containerName="keystone-bootstrap" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.939945 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b94e8f2-feeb-45a5-8716-e0364b9662eb" containerName="glance-httpd" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.940001 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b94e8f2-feeb-45a5-8716-e0364b9662eb" containerName="glance-log" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.941303 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.944538 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.944860 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.961320 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-86649f76d6-p6jhc"] Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.962732 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.966139 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-ddftm" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.966382 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.968195 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.968349 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.968382 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.968386 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 09 17:17:54 crc kubenswrapper[4840]: I1209 17:17:54.976453 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.019725 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-logs\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.019782 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-combined-ca-bundle\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.019839 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-config-data\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.019866 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.019907 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4llls\" (UniqueName: \"kubernetes.io/projected/8dc145cc-e506-4686-8e22-c881d8fc079f-kube-api-access-4llls\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.019929 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-internal-tls-certs\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.019974 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.020373 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-credential-keys\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.020415 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.020484 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-public-tls-certs\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.020510 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncfb5\" (UniqueName: \"kubernetes.io/projected/0485b258-b631-4740-a0bb-ae386586c833-kube-api-access-ncfb5\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.020533 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-config-data\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.020547 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-fernet-keys\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.020579 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-scripts\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.020597 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-scripts\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.020634 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.025876 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-86649f76d6-p6jhc"] Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122090 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-credential-keys\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122449 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122521 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-public-tls-certs\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122556 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncfb5\" (UniqueName: \"kubernetes.io/projected/0485b258-b631-4740-a0bb-ae386586c833-kube-api-access-ncfb5\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122585 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-config-data\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122606 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-fernet-keys\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122639 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-scripts\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122667 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-scripts\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122704 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122747 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-logs\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122771 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-combined-ca-bundle\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122814 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-config-data\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122838 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122876 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4llls\" (UniqueName: \"kubernetes.io/projected/8dc145cc-e506-4686-8e22-c881d8fc079f-kube-api-access-4llls\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122903 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-internal-tls-certs\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.122936 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.125324 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-logs\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.125614 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.133023 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-combined-ca-bundle\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.142871 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-credential-keys\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.151806 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.151846 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/38cd25422c2a393197855a396291af132e300524db65f2672b792068080a1237/globalmount\"" pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.155485 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-config-data\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.156656 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-scripts\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.159703 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-internal-tls-certs\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.160003 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-fernet-keys\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.162415 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-public-tls-certs\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.167539 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-scripts\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.168086 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dc145cc-e506-4686-8e22-c881d8fc079f-config-data\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.173711 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.174129 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.178487 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4llls\" (UniqueName: \"kubernetes.io/projected/8dc145cc-e506-4686-8e22-c881d8fc079f-kube-api-access-4llls\") pod \"keystone-86649f76d6-p6jhc\" (UID: \"8dc145cc-e506-4686-8e22-c881d8fc079f\") " pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.243052 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncfb5\" (UniqueName: \"kubernetes.io/projected/0485b258-b631-4740-a0bb-ae386586c833-kube-api-access-ncfb5\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.298312 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.298611 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.563007 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.846108 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"959b2112-8599-463a-8f23-913766ab1b4e","Type":"ContainerStarted","Data":"cd11d777ab731590b573139b2ee8a9010d67ab18f1910043d9dda367457ab892"} Dec 09 17:17:55 crc kubenswrapper[4840]: I1209 17:17:55.881484 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.881465697 podStartE2EDuration="3.881465697s" podCreationTimestamp="2025-12-09 17:17:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:17:55.872436843 +0000 UTC m=+1261.863547486" watchObservedRunningTime="2025-12-09 17:17:55.881465697 +0000 UTC m=+1261.872576330" Dec 09 17:17:56 crc kubenswrapper[4840]: I1209 17:17:56.637113 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b94e8f2-feeb-45a5-8716-e0364b9662eb" path="/var/lib/kubelet/pods/6b94e8f2-feeb-45a5-8716-e0364b9662eb/volumes" Dec 09 17:17:56 crc kubenswrapper[4840]: I1209 17:17:56.829144 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:17:56 crc kubenswrapper[4840]: I1209 17:17:56.889476 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-ps8ql"] Dec 09 17:17:56 crc kubenswrapper[4840]: I1209 17:17:56.889718 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" podUID="cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" containerName="dnsmasq-dns" containerID="cri-o://b4e1edc5396069c990d0b9ba62bb4dcc82964589008fccfd92b4c09dadd97eec" gracePeriod=10 Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.479288 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.576850 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwxv7\" (UniqueName: \"kubernetes.io/projected/97895c55-e758-4bd3-981c-2c9bd5eeabcb-kube-api-access-fwxv7\") pod \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.577093 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-db-sync-config-data\") pod \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.577177 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-combined-ca-bundle\") pod \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\" (UID: \"97895c55-e758-4bd3-981c-2c9bd5eeabcb\") " Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.598407 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97895c55-e758-4bd3-981c-2c9bd5eeabcb-kube-api-access-fwxv7" (OuterVolumeSpecName: "kube-api-access-fwxv7") pod "97895c55-e758-4bd3-981c-2c9bd5eeabcb" (UID: "97895c55-e758-4bd3-981c-2c9bd5eeabcb"). InnerVolumeSpecName "kube-api-access-fwxv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.598311 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "97895c55-e758-4bd3-981c-2c9bd5eeabcb" (UID: "97895c55-e758-4bd3-981c-2c9bd5eeabcb"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.610387 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97895c55-e758-4bd3-981c-2c9bd5eeabcb" (UID: "97895c55-e758-4bd3-981c-2c9bd5eeabcb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.679274 4840 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.679315 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97895c55-e758-4bd3-981c-2c9bd5eeabcb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.679329 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fwxv7\" (UniqueName: \"kubernetes.io/projected/97895c55-e758-4bd3-981c-2c9bd5eeabcb-kube-api-access-fwxv7\") on node \"crc\" DevicePath \"\"" Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.916700 4840 generic.go:334] "Generic (PLEG): container finished" podID="cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" containerID="b4e1edc5396069c990d0b9ba62bb4dcc82964589008fccfd92b4c09dadd97eec" exitCode=0 Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.916770 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" event={"ID":"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5","Type":"ContainerDied","Data":"b4e1edc5396069c990d0b9ba62bb4dcc82964589008fccfd92b4c09dadd97eec"} Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.918208 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-frlvd" event={"ID":"97895c55-e758-4bd3-981c-2c9bd5eeabcb","Type":"ContainerDied","Data":"037a10c81a512060709a8ae57ce47505e12ee21bf54d3bd3c5d8d291add8ceb3"} Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.918232 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="037a10c81a512060709a8ae57ce47505e12ee21bf54d3bd3c5d8d291add8ceb3" Dec 09 17:17:57 crc kubenswrapper[4840]: I1209 17:17:57.918283 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-frlvd" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.772405 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-7d74754bb4-694mt"] Dec 09 17:17:58 crc kubenswrapper[4840]: E1209 17:17:58.772797 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97895c55-e758-4bd3-981c-2c9bd5eeabcb" containerName="barbican-db-sync" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.772814 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="97895c55-e758-4bd3-981c-2c9bd5eeabcb" containerName="barbican-db-sync" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.773021 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="97895c55-e758-4bd3-981c-2c9bd5eeabcb" containerName="barbican-db-sync" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.774177 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.775625 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.784330 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-klcfc" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.784544 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.818630 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7d74754bb4-694mt"] Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.838250 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-8455558bd7-bssg9"] Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.839772 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.855878 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.883866 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-8455558bd7-bssg9"] Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.903372 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qljz4\" (UniqueName: \"kubernetes.io/projected/84560bb3-a93c-4016-a341-e4c3cba8651e-kube-api-access-qljz4\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.903808 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84560bb3-a93c-4016-a341-e4c3cba8651e-logs\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.903878 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84560bb3-a93c-4016-a341-e4c3cba8651e-config-data\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.903928 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/84560bb3-a93c-4016-a341-e4c3cba8651e-config-data-custom\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.939275 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84560bb3-a93c-4016-a341-e4c3cba8651e-combined-ca-bundle\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.962854 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7c7v9"] Dec 09 17:17:58 crc kubenswrapper[4840]: I1209 17:17:58.973473 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041218 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-config\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041277 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/84560bb3-a93c-4016-a341-e4c3cba8651e-config-data-custom\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041310 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041333 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84560bb3-a93c-4016-a341-e4c3cba8651e-combined-ca-bundle\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041349 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-svc\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041384 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hjwd\" (UniqueName: \"kubernetes.io/projected/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-kube-api-access-4hjwd\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041403 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nknjq\" (UniqueName: \"kubernetes.io/projected/489b3fb9-e4f8-40d7-ab9f-590a1c482235-kube-api-access-nknjq\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041443 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qljz4\" (UniqueName: \"kubernetes.io/projected/84560bb3-a93c-4016-a341-e4c3cba8651e-kube-api-access-qljz4\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041473 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-config-data\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041490 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041510 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-combined-ca-bundle\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041541 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-config-data-custom\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041575 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041601 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84560bb3-a93c-4016-a341-e4c3cba8651e-logs\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041626 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-logs\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.041646 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84560bb3-a93c-4016-a341-e4c3cba8651e-config-data\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.042946 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7c7v9"] Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.043632 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84560bb3-a93c-4016-a341-e4c3cba8651e-logs\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.048371 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84560bb3-a93c-4016-a341-e4c3cba8651e-combined-ca-bundle\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.048858 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84560bb3-a93c-4016-a341-e4c3cba8651e-config-data\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.071105 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7bfb47b7b8-d27mm"] Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.073087 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.073208 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qljz4\" (UniqueName: \"kubernetes.io/projected/84560bb3-a93c-4016-a341-e4c3cba8651e-kube-api-access-qljz4\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.080629 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.083928 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/84560bb3-a93c-4016-a341-e4c3cba8651e-config-data-custom\") pod \"barbican-keystone-listener-7d74754bb4-694mt\" (UID: \"84560bb3-a93c-4016-a341-e4c3cba8651e\") " pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.091173 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7bfb47b7b8-d27mm"] Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.116099 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.143552 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.143612 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-svc\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.143668 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hjwd\" (UniqueName: \"kubernetes.io/projected/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-kube-api-access-4hjwd\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.143699 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nknjq\" (UniqueName: \"kubernetes.io/projected/489b3fb9-e4f8-40d7-ab9f-590a1c482235-kube-api-access-nknjq\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.143754 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144014 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-config-data\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144060 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144085 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ada58ef9-8583-4795-8540-093071fb0980-logs\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144222 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-combined-ca-bundle\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144286 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-config-data-custom\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144352 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144424 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-combined-ca-bundle\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144490 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-logs\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144528 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data-custom\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144555 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngc9s\" (UniqueName: \"kubernetes.io/projected/ada58ef9-8583-4795-8540-093071fb0980-kube-api-access-ngc9s\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144579 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-config\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144756 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-svc\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.144884 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.145435 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-logs\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.145561 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.146313 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.148511 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-config\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.148782 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-config-data-custom\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.151101 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-config-data\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.155093 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-combined-ca-bundle\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.165593 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hjwd\" (UniqueName: \"kubernetes.io/projected/bef7e9a8-ed8b-477e-a9f8-329dda25e45c-kube-api-access-4hjwd\") pod \"barbican-worker-8455558bd7-bssg9\" (UID: \"bef7e9a8-ed8b-477e-a9f8-329dda25e45c\") " pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.167784 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nknjq\" (UniqueName: \"kubernetes.io/projected/489b3fb9-e4f8-40d7-ab9f-590a1c482235-kube-api-access-nknjq\") pod \"dnsmasq-dns-688c87cc99-7c7v9\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.220106 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" podUID="cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.158:5353: connect: connection refused" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.245673 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-combined-ca-bundle\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.245744 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data-custom\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.245766 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngc9s\" (UniqueName: \"kubernetes.io/projected/ada58ef9-8583-4795-8540-093071fb0980-kube-api-access-ngc9s\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.245855 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.245896 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ada58ef9-8583-4795-8540-093071fb0980-logs\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.246465 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ada58ef9-8583-4795-8540-093071fb0980-logs\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.249990 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data-custom\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.251426 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-combined-ca-bundle\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.260764 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.268667 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngc9s\" (UniqueName: \"kubernetes.io/projected/ada58ef9-8583-4795-8540-093071fb0980-kube-api-access-ngc9s\") pod \"barbican-api-7bfb47b7b8-d27mm\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.313768 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.458401 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-8455558bd7-bssg9" Dec 09 17:17:59 crc kubenswrapper[4840]: I1209 17:17:59.548044 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.150136 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7c5899b866-c7lp6"] Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.152370 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.154460 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.159052 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.160750 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7c5899b866-c7lp6"] Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.201806 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-config-data\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.201850 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/275eb97a-385a-428b-8635-e31d1e4def98-logs\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.201875 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-config-data-custom\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.201905 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5924x\" (UniqueName: \"kubernetes.io/projected/275eb97a-385a-428b-8635-e31d1e4def98-kube-api-access-5924x\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.202282 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-combined-ca-bundle\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.202346 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-internal-tls-certs\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.202371 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-public-tls-certs\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.303666 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-combined-ca-bundle\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.304644 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-internal-tls-certs\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.304750 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-public-tls-certs\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.304907 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-config-data\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.305004 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/275eb97a-385a-428b-8635-e31d1e4def98-logs\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.305093 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-config-data-custom\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.305173 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5924x\" (UniqueName: \"kubernetes.io/projected/275eb97a-385a-428b-8635-e31d1e4def98-kube-api-access-5924x\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.305748 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/275eb97a-385a-428b-8635-e31d1e4def98-logs\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.310775 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-config-data-custom\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.311092 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-config-data\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.313874 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-internal-tls-certs\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.316293 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-public-tls-certs\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.317037 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/275eb97a-385a-428b-8635-e31d1e4def98-combined-ca-bundle\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.330693 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5924x\" (UniqueName: \"kubernetes.io/projected/275eb97a-385a-428b-8635-e31d1e4def98-kube-api-access-5924x\") pod \"barbican-api-7c5899b866-c7lp6\" (UID: \"275eb97a-385a-428b-8635-e31d1e4def98\") " pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:02 crc kubenswrapper[4840]: I1209 17:18:02.474458 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:03 crc kubenswrapper[4840]: I1209 17:18:03.144866 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 09 17:18:03 crc kubenswrapper[4840]: I1209 17:18:03.145217 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 09 17:18:03 crc kubenswrapper[4840]: I1209 17:18:03.184163 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 09 17:18:03 crc kubenswrapper[4840]: I1209 17:18:03.202492 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 09 17:18:03 crc kubenswrapper[4840]: I1209 17:18:03.982528 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 09 17:18:03 crc kubenswrapper[4840]: I1209 17:18:03.982578 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 09 17:18:04 crc kubenswrapper[4840]: I1209 17:18:04.036354 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:18:04 crc kubenswrapper[4840]: I1209 17:18:04.036448 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:18:04 crc kubenswrapper[4840]: I1209 17:18:04.220586 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" podUID="cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.158:5353: connect: connection refused" Dec 09 17:18:04 crc kubenswrapper[4840]: E1209 17:18:04.306508 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f6c52e_2e47_442e_80fe_a03f7b9582fe.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f6c52e_2e47_442e_80fe_a03f7b9582fe.slice/crio-3cba781eb1a23b331b4482d7a007e9bda24e64b87799ef7b46741feabbf661bc\": RecentStats: unable to find data in memory cache]" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.235649 4840 scope.go:117] "RemoveContainer" containerID="423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.375445 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4ts68" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.417864 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzpk9\" (UniqueName: \"kubernetes.io/projected/64c96895-a046-41da-83d1-5cb61d38de00-kube-api-access-mzpk9\") pod \"64c96895-a046-41da-83d1-5cb61d38de00\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.418186 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64c96895-a046-41da-83d1-5cb61d38de00-logs\") pod \"64c96895-a046-41da-83d1-5cb61d38de00\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.418236 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-combined-ca-bundle\") pod \"64c96895-a046-41da-83d1-5cb61d38de00\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.418312 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-config-data\") pod \"64c96895-a046-41da-83d1-5cb61d38de00\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.418439 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-scripts\") pod \"64c96895-a046-41da-83d1-5cb61d38de00\" (UID: \"64c96895-a046-41da-83d1-5cb61d38de00\") " Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.420482 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64c96895-a046-41da-83d1-5cb61d38de00-logs" (OuterVolumeSpecName: "logs") pod "64c96895-a046-41da-83d1-5cb61d38de00" (UID: "64c96895-a046-41da-83d1-5cb61d38de00"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.435655 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64c96895-a046-41da-83d1-5cb61d38de00-kube-api-access-mzpk9" (OuterVolumeSpecName: "kube-api-access-mzpk9") pod "64c96895-a046-41da-83d1-5cb61d38de00" (UID: "64c96895-a046-41da-83d1-5cb61d38de00"). InnerVolumeSpecName "kube-api-access-mzpk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.442198 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-scripts" (OuterVolumeSpecName: "scripts") pod "64c96895-a046-41da-83d1-5cb61d38de00" (UID: "64c96895-a046-41da-83d1-5cb61d38de00"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.454582 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-config-data" (OuterVolumeSpecName: "config-data") pod "64c96895-a046-41da-83d1-5cb61d38de00" (UID: "64c96895-a046-41da-83d1-5cb61d38de00"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.460004 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "64c96895-a046-41da-83d1-5cb61d38de00" (UID: "64c96895-a046-41da-83d1-5cb61d38de00"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.521180 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64c96895-a046-41da-83d1-5cb61d38de00-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.521214 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.521225 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.521234 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64c96895-a046-41da-83d1-5cb61d38de00-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.521243 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzpk9\" (UniqueName: \"kubernetes.io/projected/64c96895-a046-41da-83d1-5cb61d38de00-kube-api-access-mzpk9\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.571233 4840 scope.go:117] "RemoveContainer" containerID="0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336" Dec 09 17:18:07 crc kubenswrapper[4840]: E1209 17:18:07.571691 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336\": container with ID starting with 0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336 not found: ID does not exist" containerID="0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.571736 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336"} err="failed to get container status \"0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336\": rpc error: code = NotFound desc = could not find container \"0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336\": container with ID starting with 0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336 not found: ID does not exist" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.571766 4840 scope.go:117] "RemoveContainer" containerID="423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d" Dec 09 17:18:07 crc kubenswrapper[4840]: E1209 17:18:07.572379 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d\": container with ID starting with 423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d not found: ID does not exist" containerID="423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.572434 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d"} err="failed to get container status \"423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d\": rpc error: code = NotFound desc = could not find container \"423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d\": container with ID starting with 423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d not found: ID does not exist" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.572460 4840 scope.go:117] "RemoveContainer" containerID="0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.573129 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336"} err="failed to get container status \"0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336\": rpc error: code = NotFound desc = could not find container \"0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336\": container with ID starting with 0f449a8f450c42e08e4f8e8f7615c6fc8396da9fe7ad7556240e693c14166336 not found: ID does not exist" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.573148 4840 scope.go:117] "RemoveContainer" containerID="423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.573411 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d"} err="failed to get container status \"423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d\": rpc error: code = NotFound desc = could not find container \"423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d\": container with ID starting with 423dd3446c7595163030ab9a2d8bb22e5fb8a32c73ee794dadacda3bd1235b3d not found: ID does not exist" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.742143 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.830703 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-svc\") pod \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.830752 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-config\") pod \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.830806 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grffj\" (UniqueName: \"kubernetes.io/projected/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-kube-api-access-grffj\") pod \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.830834 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-sb\") pod \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.830940 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-nb\") pod \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.831033 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-swift-storage-0\") pod \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\" (UID: \"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5\") " Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.853261 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-kube-api-access-grffj" (OuterVolumeSpecName: "kube-api-access-grffj") pod "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" (UID: "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5"). InnerVolumeSpecName "kube-api-access-grffj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.933527 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grffj\" (UniqueName: \"kubernetes.io/projected/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-kube-api-access-grffj\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:07 crc kubenswrapper[4840]: I1209 17:18:07.981056 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" (UID: "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.002803 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.034752 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" (UID: "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.035073 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.035176 4840 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.035289 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4ts68" event={"ID":"64c96895-a046-41da-83d1-5cb61d38de00","Type":"ContainerDied","Data":"200d921f9ad943440a48e70cc08a796f8dea939b5fbb5641adc8117b4f32378f"} Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.035321 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="200d921f9ad943440a48e70cc08a796f8dea939b5fbb5641adc8117b4f32378f" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.035329 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4ts68" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.038641 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" event={"ID":"cede2dd2-02e6-4edd-9ba1-ed7c49df38a5","Type":"ContainerDied","Data":"a6cacf2b2e20a73dcfcd4a403fd900b43834f0fea99a839b4e37df412f1c9b4d"} Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.038707 4840 scope.go:117] "RemoveContainer" containerID="b4e1edc5396069c990d0b9ba62bb4dcc82964589008fccfd92b4c09dadd97eec" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.038730 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-ps8ql" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.073176 4840 scope.go:117] "RemoveContainer" containerID="75d48060e8cab7ec2cb1727c5465294b96d4b535243816d577622e4cc4be5e3e" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.080348 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" (UID: "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.085548 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-config" (OuterVolumeSpecName: "config") pod "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" (UID: "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.093488 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" (UID: "cede2dd2-02e6-4edd-9ba1-ed7c49df38a5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.105246 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-86649f76d6-p6jhc"] Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.137420 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.137459 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.137473 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.221466 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.221756 4840 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.333664 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.427073 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-ps8ql"] Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.447255 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-ps8ql"] Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.512001 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7d74754bb4-694mt"] Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.546130 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5df4f579dd-7gd8n"] Dec 09 17:18:08 crc kubenswrapper[4840]: E1209 17:18:08.563231 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" containerName="init" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.563287 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" containerName="init" Dec 09 17:18:08 crc kubenswrapper[4840]: E1209 17:18:08.563313 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64c96895-a046-41da-83d1-5cb61d38de00" containerName="placement-db-sync" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.563324 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="64c96895-a046-41da-83d1-5cb61d38de00" containerName="placement-db-sync" Dec 09 17:18:08 crc kubenswrapper[4840]: E1209 17:18:08.563374 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" containerName="dnsmasq-dns" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.563380 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" containerName="dnsmasq-dns" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.563740 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" containerName="dnsmasq-dns" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.563771 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="64c96895-a046-41da-83d1-5cb61d38de00" containerName="placement-db-sync" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.567583 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7c7v9"] Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.567623 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7c5899b866-c7lp6"] Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.576550 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.582065 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5df4f579dd-7gd8n"] Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.582257 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.582489 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-nh9m5" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.587283 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.587528 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.587674 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.598268 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-8455558bd7-bssg9"] Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.658347 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-scripts\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.658774 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-internal-tls-certs\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.658848 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-config-data\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.658904 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-725qk\" (UniqueName: \"kubernetes.io/projected/ccffcd1a-4659-4005-abd2-ae99de7f74d1-kube-api-access-725qk\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.658943 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccffcd1a-4659-4005-abd2-ae99de7f74d1-logs\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.658368 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cede2dd2-02e6-4edd-9ba1-ed7c49df38a5" path="/var/lib/kubelet/pods/cede2dd2-02e6-4edd-9ba1-ed7c49df38a5/volumes" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.659019 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-public-tls-certs\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.659139 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-combined-ca-bundle\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.667650 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7bfb47b7b8-d27mm"] Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.760524 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-combined-ca-bundle\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.760576 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-scripts\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.760864 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-internal-tls-certs\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.761562 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-config-data\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.761641 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-725qk\" (UniqueName: \"kubernetes.io/projected/ccffcd1a-4659-4005-abd2-ae99de7f74d1-kube-api-access-725qk\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.761687 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccffcd1a-4659-4005-abd2-ae99de7f74d1-logs\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.761730 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-public-tls-certs\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.762176 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccffcd1a-4659-4005-abd2-ae99de7f74d1-logs\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.772906 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-internal-tls-certs\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.774045 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-config-data\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.775949 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-scripts\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.776149 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-public-tls-certs\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.776195 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccffcd1a-4659-4005-abd2-ae99de7f74d1-combined-ca-bundle\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.780805 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-725qk\" (UniqueName: \"kubernetes.io/projected/ccffcd1a-4659-4005-abd2-ae99de7f74d1-kube-api-access-725qk\") pod \"placement-5df4f579dd-7gd8n\" (UID: \"ccffcd1a-4659-4005-abd2-ae99de7f74d1\") " pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:08 crc kubenswrapper[4840]: I1209 17:18:08.901851 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.064323 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" event={"ID":"489b3fb9-e4f8-40d7-ab9f-590a1c482235","Type":"ContainerStarted","Data":"440f5c24f4401aaa6d9155f28618e4ee2fff407f472ec31226aff4153b7bc790"} Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.073427 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0485b258-b631-4740-a0bb-ae386586c833","Type":"ContainerStarted","Data":"992b04068e781992f79a473904dcf715fc2851599f0f23b6ae46c50e6a21879b"} Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.075405 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-sync-f6mr5" event={"ID":"c6c5b609-3028-4ff9-9bf7-88fa13784f6a","Type":"ContainerStarted","Data":"31b7de18858821baab09999cca70834aa04404fc36fd5f265f6740bf8d02f980"} Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.092379 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-86649f76d6-p6jhc" event={"ID":"8dc145cc-e506-4686-8e22-c881d8fc079f","Type":"ContainerStarted","Data":"3f4dc1c1d500094968b0db7c91f163a32e4727613576a0fdddf25cb639cf0127"} Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.092426 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-86649f76d6-p6jhc" event={"ID":"8dc145cc-e506-4686-8e22-c881d8fc079f","Type":"ContainerStarted","Data":"7585a6d91ef6c72273f9a97de4e2788ca127db8df219c34b1a5d972d90b6fe14"} Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.093414 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.106367 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c5899b866-c7lp6" event={"ID":"275eb97a-385a-428b-8635-e31d1e4def98","Type":"ContainerStarted","Data":"6db72254398e762468b918afc110f8510cb697a8783f067a2ccaa81888ed4c7a"} Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.113730 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" event={"ID":"84560bb3-a93c-4016-a341-e4c3cba8651e","Type":"ContainerStarted","Data":"94b1f8e0968b501fd0dbb8377fe49d49c845e473dcdfb25116dbdd92d502de69"} Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.115690 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7bfb47b7b8-d27mm" event={"ID":"ada58ef9-8583-4795-8540-093071fb0980","Type":"ContainerStarted","Data":"c7c780abbb40a993c43eeda2cbae1b6bc78b84ca1bacedf5fdd12081644e2207"} Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.116765 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-8455558bd7-bssg9" event={"ID":"bef7e9a8-ed8b-477e-a9f8-329dda25e45c","Type":"ContainerStarted","Data":"400147ebda5dfb3747043320be803b298c6e285dc4c49d9c00418632b6fb6512"} Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.117358 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-db-sync-f6mr5" podStartSLOduration=4.074149283 podStartE2EDuration="56.117342937s" podCreationTimestamp="2025-12-09 17:17:13 +0000 UTC" firstStartedPulling="2025-12-09 17:17:15.664124956 +0000 UTC m=+1221.655235589" lastFinishedPulling="2025-12-09 17:18:07.70731861 +0000 UTC m=+1273.698429243" observedRunningTime="2025-12-09 17:18:09.100011051 +0000 UTC m=+1275.091121694" watchObservedRunningTime="2025-12-09 17:18:09.117342937 +0000 UTC m=+1275.108453570" Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.133408 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-86649f76d6-p6jhc" podStartSLOduration=15.133385957 podStartE2EDuration="15.133385957s" podCreationTimestamp="2025-12-09 17:17:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:09.120426114 +0000 UTC m=+1275.111536747" watchObservedRunningTime="2025-12-09 17:18:09.133385957 +0000 UTC m=+1275.124496590" Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.176022 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f","Type":"ContainerStarted","Data":"b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16"} Dec 09 17:18:09 crc kubenswrapper[4840]: I1209 17:18:09.570737 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5df4f579dd-7gd8n"] Dec 09 17:18:10 crc kubenswrapper[4840]: I1209 17:18:10.193075 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5df4f579dd-7gd8n" event={"ID":"ccffcd1a-4659-4005-abd2-ae99de7f74d1","Type":"ContainerStarted","Data":"426b77a65eeeb93e67cb967c55893816df861167f5a05342c66414d13014cba1"} Dec 09 17:18:10 crc kubenswrapper[4840]: I1209 17:18:10.198797 4840 generic.go:334] "Generic (PLEG): container finished" podID="489b3fb9-e4f8-40d7-ab9f-590a1c482235" containerID="6e6d48074944b2d50efc11e3d3b7f9a65e513886562bbfc0fc09797dee964491" exitCode=0 Dec 09 17:18:10 crc kubenswrapper[4840]: I1209 17:18:10.198875 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" event={"ID":"489b3fb9-e4f8-40d7-ab9f-590a1c482235","Type":"ContainerDied","Data":"6e6d48074944b2d50efc11e3d3b7f9a65e513886562bbfc0fc09797dee964491"} Dec 09 17:18:10 crc kubenswrapper[4840]: I1209 17:18:10.207755 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0485b258-b631-4740-a0bb-ae386586c833","Type":"ContainerStarted","Data":"626b7bfa78e717405b54e7f386fa77b7330f8d705011bbd94943f37ad0462ba1"} Dec 09 17:18:10 crc kubenswrapper[4840]: I1209 17:18:10.210368 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7bfb47b7b8-d27mm" event={"ID":"ada58ef9-8583-4795-8540-093071fb0980","Type":"ContainerStarted","Data":"858f5486a14578e49078e05c95e2cdbda49fa8716d61579f0db90c140f1ec3fb"} Dec 09 17:18:10 crc kubenswrapper[4840]: I1209 17:18:10.212659 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c5899b866-c7lp6" event={"ID":"275eb97a-385a-428b-8635-e31d1e4def98","Type":"ContainerStarted","Data":"cd48bb22309e82b67f040790a9013f94ada5f365c3573b15667ccb40e2c40da6"} Dec 09 17:18:10 crc kubenswrapper[4840]: I1209 17:18:10.217420 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qv6wf" event={"ID":"7ff5b771-f400-4f66-9d95-9f66fff18a82","Type":"ContainerStarted","Data":"dfc9acac5dd49d68d5b610e43cefd88edf85db17be73880a3e4512467e7849c4"} Dec 09 17:18:10 crc kubenswrapper[4840]: I1209 17:18:10.261602 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-qv6wf" podStartSLOduration=4.140222739 podStartE2EDuration="57.261578189s" podCreationTimestamp="2025-12-09 17:17:13 +0000 UTC" firstStartedPulling="2025-12-09 17:17:14.584232071 +0000 UTC m=+1220.575342704" lastFinishedPulling="2025-12-09 17:18:07.705587521 +0000 UTC m=+1273.696698154" observedRunningTime="2025-12-09 17:18:10.258883793 +0000 UTC m=+1276.249994436" watchObservedRunningTime="2025-12-09 17:18:10.261578189 +0000 UTC m=+1276.252688832" Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.232222 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" event={"ID":"489b3fb9-e4f8-40d7-ab9f-590a1c482235","Type":"ContainerStarted","Data":"7e4d910f3a737d8eb5d0a46a77ffce9a085d0ffcfab2cfd0fd52d78722fd7813"} Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.232745 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.234173 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0485b258-b631-4740-a0bb-ae386586c833","Type":"ContainerStarted","Data":"1433c408e61e693379371b2479e67fb674c78d2bbed7d61f213fe5c0949a86e6"} Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.237793 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7bfb47b7b8-d27mm" event={"ID":"ada58ef9-8583-4795-8540-093071fb0980","Type":"ContainerStarted","Data":"9df212b5223e982effd724b349e838e5fcb80494502e14e236814caa91ebc1b7"} Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.238418 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.238459 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.240383 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c5899b866-c7lp6" event={"ID":"275eb97a-385a-428b-8635-e31d1e4def98","Type":"ContainerStarted","Data":"36b602c7c5b03ae362f9220ffbf6e54329e383353f7155561a641a435687f259"} Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.241154 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.241179 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.243935 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5df4f579dd-7gd8n" event={"ID":"ccffcd1a-4659-4005-abd2-ae99de7f74d1","Type":"ContainerStarted","Data":"9230d2c8ae6d6c70d61ebc5bac2e87bbf5a4de6f22e0816af7ff368cbf1049a2"} Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.267833 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" podStartSLOduration=13.267810749 podStartE2EDuration="13.267810749s" podCreationTimestamp="2025-12-09 17:17:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:11.257186861 +0000 UTC m=+1277.248297514" watchObservedRunningTime="2025-12-09 17:18:11.267810749 +0000 UTC m=+1277.258921382" Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.291168 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7c5899b866-c7lp6" podStartSLOduration=9.291152814 podStartE2EDuration="9.291152814s" podCreationTimestamp="2025-12-09 17:18:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:11.281484452 +0000 UTC m=+1277.272595085" watchObservedRunningTime="2025-12-09 17:18:11.291152814 +0000 UTC m=+1277.282263447" Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.336212 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7bfb47b7b8-d27mm" podStartSLOduration=13.336190637 podStartE2EDuration="13.336190637s" podCreationTimestamp="2025-12-09 17:17:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:11.307082641 +0000 UTC m=+1277.298193284" watchObservedRunningTime="2025-12-09 17:18:11.336190637 +0000 UTC m=+1277.327301270" Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.337694 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=17.337685309 podStartE2EDuration="17.337685309s" podCreationTimestamp="2025-12-09 17:17:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:11.326190187 +0000 UTC m=+1277.317300830" watchObservedRunningTime="2025-12-09 17:18:11.337685309 +0000 UTC m=+1277.328795942" Dec 09 17:18:11 crc kubenswrapper[4840]: I1209 17:18:11.693978 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:18:13 crc kubenswrapper[4840]: I1209 17:18:13.288985 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" event={"ID":"84560bb3-a93c-4016-a341-e4c3cba8651e","Type":"ContainerStarted","Data":"9138cdd18c798cf652d83fde5c5cb4581beff1556afc3afa2c346e9380f3ded8"} Dec 09 17:18:13 crc kubenswrapper[4840]: I1209 17:18:13.289496 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" event={"ID":"84560bb3-a93c-4016-a341-e4c3cba8651e","Type":"ContainerStarted","Data":"3cef4987714e01ecd35496fc4e59c44c87cc0ae27139bb2ce6307564322de9ef"} Dec 09 17:18:13 crc kubenswrapper[4840]: I1209 17:18:13.292134 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-8455558bd7-bssg9" event={"ID":"bef7e9a8-ed8b-477e-a9f8-329dda25e45c","Type":"ContainerStarted","Data":"a649483e61442211ee49a9cc23a5a3c22ce1578386a0da0b41e78bc34219d4e8"} Dec 09 17:18:13 crc kubenswrapper[4840]: I1209 17:18:13.292163 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-8455558bd7-bssg9" event={"ID":"bef7e9a8-ed8b-477e-a9f8-329dda25e45c","Type":"ContainerStarted","Data":"e69a17e672fe4250d32e91a74801060b242a7430726a9787ccf6af7293c8b289"} Dec 09 17:18:13 crc kubenswrapper[4840]: I1209 17:18:13.296721 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5df4f579dd-7gd8n" event={"ID":"ccffcd1a-4659-4005-abd2-ae99de7f74d1","Type":"ContainerStarted","Data":"9843bc1a515060c3b7596a8e15827cf7f0cc576972cdb709f11b965cfb740ef1"} Dec 09 17:18:13 crc kubenswrapper[4840]: I1209 17:18:13.296948 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:13 crc kubenswrapper[4840]: I1209 17:18:13.297008 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:13 crc kubenswrapper[4840]: I1209 17:18:13.320314 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-7d74754bb4-694mt" podStartSLOduration=11.711853937 podStartE2EDuration="15.32026974s" podCreationTimestamp="2025-12-09 17:17:58 +0000 UTC" firstStartedPulling="2025-12-09 17:18:08.589692204 +0000 UTC m=+1274.580802837" lastFinishedPulling="2025-12-09 17:18:12.198108007 +0000 UTC m=+1278.189218640" observedRunningTime="2025-12-09 17:18:13.307847871 +0000 UTC m=+1279.298958504" watchObservedRunningTime="2025-12-09 17:18:13.32026974 +0000 UTC m=+1279.311380373" Dec 09 17:18:13 crc kubenswrapper[4840]: I1209 17:18:13.332052 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-8455558bd7-bssg9" podStartSLOduration=11.742940648 podStartE2EDuration="15.33203256s" podCreationTimestamp="2025-12-09 17:17:58 +0000 UTC" firstStartedPulling="2025-12-09 17:18:08.611844335 +0000 UTC m=+1274.602954968" lastFinishedPulling="2025-12-09 17:18:12.200936247 +0000 UTC m=+1278.192046880" observedRunningTime="2025-12-09 17:18:13.329096087 +0000 UTC m=+1279.320206720" watchObservedRunningTime="2025-12-09 17:18:13.33203256 +0000 UTC m=+1279.323143183" Dec 09 17:18:14 crc kubenswrapper[4840]: E1209 17:18:14.557003 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f6c52e_2e47_442e_80fe_a03f7b9582fe.slice/crio-3cba781eb1a23b331b4482d7a007e9bda24e64b87799ef7b46741feabbf661bc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f6c52e_2e47_442e_80fe_a03f7b9582fe.slice\": RecentStats: unable to find data in memory cache]" Dec 09 17:18:15 crc kubenswrapper[4840]: I1209 17:18:15.317282 4840 generic.go:334] "Generic (PLEG): container finished" podID="c6c5b609-3028-4ff9-9bf7-88fa13784f6a" containerID="31b7de18858821baab09999cca70834aa04404fc36fd5f265f6740bf8d02f980" exitCode=0 Dec 09 17:18:15 crc kubenswrapper[4840]: I1209 17:18:15.317341 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-sync-f6mr5" event={"ID":"c6c5b609-3028-4ff9-9bf7-88fa13784f6a","Type":"ContainerDied","Data":"31b7de18858821baab09999cca70834aa04404fc36fd5f265f6740bf8d02f980"} Dec 09 17:18:15 crc kubenswrapper[4840]: I1209 17:18:15.321697 4840 generic.go:334] "Generic (PLEG): container finished" podID="7ff5b771-f400-4f66-9d95-9f66fff18a82" containerID="dfc9acac5dd49d68d5b610e43cefd88edf85db17be73880a3e4512467e7849c4" exitCode=0 Dec 09 17:18:15 crc kubenswrapper[4840]: I1209 17:18:15.321754 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qv6wf" event={"ID":"7ff5b771-f400-4f66-9d95-9f66fff18a82","Type":"ContainerDied","Data":"dfc9acac5dd49d68d5b610e43cefd88edf85db17be73880a3e4512467e7849c4"} Dec 09 17:18:15 crc kubenswrapper[4840]: I1209 17:18:15.344787 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5df4f579dd-7gd8n" podStartSLOduration=7.344768077 podStartE2EDuration="7.344768077s" podCreationTimestamp="2025-12-09 17:18:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:13.369980204 +0000 UTC m=+1279.361090837" watchObservedRunningTime="2025-12-09 17:18:15.344768077 +0000 UTC m=+1281.335878710" Dec 09 17:18:15 crc kubenswrapper[4840]: I1209 17:18:15.563357 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 09 17:18:15 crc kubenswrapper[4840]: I1209 17:18:15.565061 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 09 17:18:15 crc kubenswrapper[4840]: I1209 17:18:15.617310 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 09 17:18:15 crc kubenswrapper[4840]: I1209 17:18:15.618483 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 09 17:18:16 crc kubenswrapper[4840]: I1209 17:18:16.107426 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:18:16 crc kubenswrapper[4840]: I1209 17:18:16.333853 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 09 17:18:16 crc kubenswrapper[4840]: I1209 17:18:16.333903 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.201241 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.212502 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.267526 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t5sfs\" (UniqueName: \"kubernetes.io/projected/7ff5b771-f400-4f66-9d95-9f66fff18a82-kube-api-access-t5sfs\") pod \"7ff5b771-f400-4f66-9d95-9f66fff18a82\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.267860 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-combined-ca-bundle\") pod \"7ff5b771-f400-4f66-9d95-9f66fff18a82\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.267938 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-certs\") pod \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.268000 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-scripts\") pod \"7ff5b771-f400-4f66-9d95-9f66fff18a82\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.268033 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-config-data\") pod \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.268055 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-config-data\") pod \"7ff5b771-f400-4f66-9d95-9f66fff18a82\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.268070 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pr76w\" (UniqueName: \"kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-kube-api-access-pr76w\") pod \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.268115 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-db-sync-config-data\") pod \"7ff5b771-f400-4f66-9d95-9f66fff18a82\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.268151 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-scripts\") pod \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.268168 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7ff5b771-f400-4f66-9d95-9f66fff18a82-etc-machine-id\") pod \"7ff5b771-f400-4f66-9d95-9f66fff18a82\" (UID: \"7ff5b771-f400-4f66-9d95-9f66fff18a82\") " Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.268203 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-combined-ca-bundle\") pod \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\" (UID: \"c6c5b609-3028-4ff9-9bf7-88fa13784f6a\") " Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.270146 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7ff5b771-f400-4f66-9d95-9f66fff18a82-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "7ff5b771-f400-4f66-9d95-9f66fff18a82" (UID: "7ff5b771-f400-4f66-9d95-9f66fff18a82"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.275357 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ff5b771-f400-4f66-9d95-9f66fff18a82-kube-api-access-t5sfs" (OuterVolumeSpecName: "kube-api-access-t5sfs") pod "7ff5b771-f400-4f66-9d95-9f66fff18a82" (UID: "7ff5b771-f400-4f66-9d95-9f66fff18a82"). InnerVolumeSpecName "kube-api-access-t5sfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.283046 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-kube-api-access-pr76w" (OuterVolumeSpecName: "kube-api-access-pr76w") pod "c6c5b609-3028-4ff9-9bf7-88fa13784f6a" (UID: "c6c5b609-3028-4ff9-9bf7-88fa13784f6a"). InnerVolumeSpecName "kube-api-access-pr76w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.285431 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "7ff5b771-f400-4f66-9d95-9f66fff18a82" (UID: "7ff5b771-f400-4f66-9d95-9f66fff18a82"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.285539 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-certs" (OuterVolumeSpecName: "certs") pod "c6c5b609-3028-4ff9-9bf7-88fa13784f6a" (UID: "c6c5b609-3028-4ff9-9bf7-88fa13784f6a"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.286917 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-scripts" (OuterVolumeSpecName: "scripts") pod "7ff5b771-f400-4f66-9d95-9f66fff18a82" (UID: "7ff5b771-f400-4f66-9d95-9f66fff18a82"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.288078 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-scripts" (OuterVolumeSpecName: "scripts") pod "c6c5b609-3028-4ff9-9bf7-88fa13784f6a" (UID: "c6c5b609-3028-4ff9-9bf7-88fa13784f6a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.312687 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ff5b771-f400-4f66-9d95-9f66fff18a82" (UID: "7ff5b771-f400-4f66-9d95-9f66fff18a82"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.337870 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6c5b609-3028-4ff9-9bf7-88fa13784f6a" (UID: "c6c5b609-3028-4ff9-9bf7-88fa13784f6a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.377880 4840 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.377913 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.377925 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pr76w\" (UniqueName: \"kubernetes.io/projected/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-kube-api-access-pr76w\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.377936 4840 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.377947 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.377956 4840 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7ff5b771-f400-4f66-9d95-9f66fff18a82-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.377987 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.377998 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t5sfs\" (UniqueName: \"kubernetes.io/projected/7ff5b771-f400-4f66-9d95-9f66fff18a82-kube-api-access-t5sfs\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.378008 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.410058 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-config-data" (OuterVolumeSpecName: "config-data") pod "c6c5b609-3028-4ff9-9bf7-88fa13784f6a" (UID: "c6c5b609-3028-4ff9-9bf7-88fa13784f6a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.411189 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qv6wf" event={"ID":"7ff5b771-f400-4f66-9d95-9f66fff18a82","Type":"ContainerDied","Data":"7d6359b4eed65e67958458ec6fe303f82c86b968223fbdabd44b6785339b8f56"} Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.411228 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d6359b4eed65e67958458ec6fe303f82c86b968223fbdabd44b6785339b8f56" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.411286 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qv6wf" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.421078 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-sync-f6mr5" event={"ID":"c6c5b609-3028-4ff9-9bf7-88fa13784f6a","Type":"ContainerDied","Data":"f1ef775402ea577b416476f2793893e9cf46ab45089ab26f9c1d95459dfb5afc"} Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.421333 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1ef775402ea577b416476f2793893e9cf46ab45089ab26f9c1d95459dfb5afc" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.421122 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-f6mr5" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.425109 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-config-data" (OuterVolumeSpecName: "config-data") pod "7ff5b771-f400-4f66-9d95-9f66fff18a82" (UID: "7ff5b771-f400-4f66-9d95-9f66fff18a82"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.479544 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6c5b609-3028-4ff9-9bf7-88fa13784f6a-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.479570 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ff5b771-f400-4f66-9d95-9f66fff18a82-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.574335 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-storageinit-ddrkz"] Dec 09 17:18:17 crc kubenswrapper[4840]: E1209 17:18:17.574805 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ff5b771-f400-4f66-9d95-9f66fff18a82" containerName="cinder-db-sync" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.574822 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ff5b771-f400-4f66-9d95-9f66fff18a82" containerName="cinder-db-sync" Dec 09 17:18:17 crc kubenswrapper[4840]: E1209 17:18:17.574840 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6c5b609-3028-4ff9-9bf7-88fa13784f6a" containerName="cloudkitty-db-sync" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.574846 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6c5b609-3028-4ff9-9bf7-88fa13784f6a" containerName="cloudkitty-db-sync" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.575070 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6c5b609-3028-4ff9-9bf7-88fa13784f6a" containerName="cloudkitty-db-sync" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.575087 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ff5b771-f400-4f66-9d95-9f66fff18a82" containerName="cinder-db-sync" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.575814 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.589463 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.589508 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cloudkitty-client-internal" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.589559 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-config-data" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.589467 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-scripts" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.589789 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-cloudkitty-dockercfg-4pj7n" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.609055 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-storageinit-ddrkz"] Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.636443 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.638615 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.643336 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.653029 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.685112 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dlbz\" (UniqueName: \"kubernetes.io/projected/827d00c4-cdf4-43ad-bb9c-746c36adb391-kube-api-access-7dlbz\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.685161 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.685212 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.685234 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-scripts\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.685249 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-config-data\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.685264 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-combined-ca-bundle\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.685328 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.685358 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-scripts\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.685432 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/827d00c4-cdf4-43ad-bb9c-746c36adb391-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.685475 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxql6\" (UniqueName: \"kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-kube-api-access-dxql6\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.685526 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-certs\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.764097 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7c7v9"] Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.764549 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" podUID="489b3fb9-e4f8-40d7-ab9f-590a1c482235" containerName="dnsmasq-dns" containerID="cri-o://7e4d910f3a737d8eb5d0a46a77ffce9a085d0ffcfab2cfd0fd52d78722fd7813" gracePeriod=10 Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.769992 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.806601 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-579hg"] Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.807535 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.807610 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-scripts\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.807641 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-config-data\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.807674 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-combined-ca-bundle\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.807813 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.807864 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-scripts\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.807948 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/827d00c4-cdf4-43ad-bb9c-746c36adb391-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.808048 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxql6\" (UniqueName: \"kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-kube-api-access-dxql6\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.808143 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-certs\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.808225 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dlbz\" (UniqueName: \"kubernetes.io/projected/827d00c4-cdf4-43ad-bb9c-746c36adb391-kube-api-access-7dlbz\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.808258 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.808452 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.813947 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/827d00c4-cdf4-43ad-bb9c-746c36adb391-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.823137 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-768d86bb9c-skrvq" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.827152 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-combined-ca-bundle\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.827710 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-scripts\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.828655 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.830383 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.839153 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.839205 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dlbz\" (UniqueName: \"kubernetes.io/projected/827d00c4-cdf4-43ad-bb9c-746c36adb391-kube-api-access-7dlbz\") pod \"cinder-scheduler-0\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.841838 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-certs\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.841900 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-579hg"] Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.847520 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-scripts\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.848245 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-config-data\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.849257 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxql6\" (UniqueName: \"kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-kube-api-access-dxql6\") pod \"cloudkitty-storageinit-ddrkz\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.908551 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.910460 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.910529 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-config\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.910609 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.910668 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.910771 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m57q5\" (UniqueName: \"kubernetes.io/projected/40aff238-497c-4504-b6e0-ca86d21d7888-kube-api-access-m57q5\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.910801 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.997319 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.998422 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-57dff6db4d-sszz8"] Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.999251 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-57dff6db4d-sszz8" podUID="a4e32138-7ab5-4f68-bad5-554ba844c8a0" containerName="neutron-api" containerID="cri-o://ff658c597a8102e9b2fe318077182e1f58c711b231db3f360ee95859b37969a8" gracePeriod=30 Dec 09 17:18:17 crc kubenswrapper[4840]: I1209 17:18:17.999871 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-57dff6db4d-sszz8" podUID="a4e32138-7ab5-4f68-bad5-554ba844c8a0" containerName="neutron-httpd" containerID="cri-o://128af6f61276d2d10b57801721898ca87045fb44e4521f15733a7803df20a9e3" gracePeriod=30 Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.012766 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.012820 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-config\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.012870 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.012914 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.013011 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m57q5\" (UniqueName: \"kubernetes.io/projected/40aff238-497c-4504-b6e0-ca86d21d7888-kube-api-access-m57q5\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.013030 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.013932 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.014526 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.025354 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.029157 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.033677 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-config\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.036958 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.039834 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.045317 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.054597 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m57q5\" (UniqueName: \"kubernetes.io/projected/40aff238-497c-4504-b6e0-ca86d21d7888-kube-api-access-m57q5\") pod \"dnsmasq-dns-6bb4fc677f-579hg\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.073938 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.115607 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data-custom\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.115942 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4w6t\" (UniqueName: \"kubernetes.io/projected/c2d58920-cf1a-4862-83c1-36819aec0ef8-kube-api-access-b4w6t\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.131475 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2d58920-cf1a-4862-83c1-36819aec0ef8-logs\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.131710 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.131822 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-scripts\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.131888 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.132102 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c2d58920-cf1a-4862-83c1-36819aec0ef8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.159377 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.237034 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data-custom\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.237106 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4w6t\" (UniqueName: \"kubernetes.io/projected/c2d58920-cf1a-4862-83c1-36819aec0ef8-kube-api-access-b4w6t\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.237162 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2d58920-cf1a-4862-83c1-36819aec0ef8-logs\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.237216 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.237236 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-scripts\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.237254 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.237304 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c2d58920-cf1a-4862-83c1-36819aec0ef8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.237416 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c2d58920-cf1a-4862-83c1-36819aec0ef8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.250246 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2d58920-cf1a-4862-83c1-36819aec0ef8-logs\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.258396 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data-custom\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.259823 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-scripts\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.265909 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.266925 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.286590 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4w6t\" (UniqueName: \"kubernetes.io/projected/c2d58920-cf1a-4862-83c1-36819aec0ef8-kube-api-access-b4w6t\") pod \"cinder-api-0\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " pod="openstack/cinder-api-0" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.423655 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.437548 4840 generic.go:334] "Generic (PLEG): container finished" podID="489b3fb9-e4f8-40d7-ab9f-590a1c482235" containerID="7e4d910f3a737d8eb5d0a46a77ffce9a085d0ffcfab2cfd0fd52d78722fd7813" exitCode=0 Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.437657 4840 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.437668 4840 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.438145 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" event={"ID":"489b3fb9-e4f8-40d7-ab9f-590a1c482235","Type":"ContainerDied","Data":"7e4d910f3a737d8eb5d0a46a77ffce9a085d0ffcfab2cfd0fd52d78722fd7813"} Dec 09 17:18:18 crc kubenswrapper[4840]: I1209 17:18:18.515471 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 17:18:19 crc kubenswrapper[4840]: I1209 17:18:19.315190 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" podUID="489b3fb9-e4f8-40d7-ab9f-590a1c482235" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.174:5353: connect: connection refused" Dec 09 17:18:19 crc kubenswrapper[4840]: I1209 17:18:19.450417 4840 generic.go:334] "Generic (PLEG): container finished" podID="a4e32138-7ab5-4f68-bad5-554ba844c8a0" containerID="128af6f61276d2d10b57801721898ca87045fb44e4521f15733a7803df20a9e3" exitCode=0 Dec 09 17:18:19 crc kubenswrapper[4840]: I1209 17:18:19.450440 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57dff6db4d-sszz8" event={"ID":"a4e32138-7ab5-4f68-bad5-554ba844c8a0","Type":"ContainerDied","Data":"128af6f61276d2d10b57801721898ca87045fb44e4521f15733a7803df20a9e3"} Dec 09 17:18:19 crc kubenswrapper[4840]: I1209 17:18:19.521341 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 09 17:18:19 crc kubenswrapper[4840]: I1209 17:18:19.521443 4840 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 17:18:19 crc kubenswrapper[4840]: I1209 17:18:19.830447 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.161825 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.189903 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nknjq\" (UniqueName: \"kubernetes.io/projected/489b3fb9-e4f8-40d7-ab9f-590a1c482235-kube-api-access-nknjq\") pod \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.189944 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-config\") pod \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.189989 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-nb\") pod \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.190134 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-swift-storage-0\") pod \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.190169 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-sb\") pod \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.190239 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-svc\") pod \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\" (UID: \"489b3fb9-e4f8-40d7-ab9f-590a1c482235\") " Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.229285 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/489b3fb9-e4f8-40d7-ab9f-590a1c482235-kube-api-access-nknjq" (OuterVolumeSpecName: "kube-api-access-nknjq") pod "489b3fb9-e4f8-40d7-ab9f-590a1c482235" (UID: "489b3fb9-e4f8-40d7-ab9f-590a1c482235"). InnerVolumeSpecName "kube-api-access-nknjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.260256 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-config" (OuterVolumeSpecName: "config") pod "489b3fb9-e4f8-40d7-ab9f-590a1c482235" (UID: "489b3fb9-e4f8-40d7-ab9f-590a1c482235"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.293483 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nknjq\" (UniqueName: \"kubernetes.io/projected/489b3fb9-e4f8-40d7-ab9f-590a1c482235-kube-api-access-nknjq\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.293520 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.336692 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "489b3fb9-e4f8-40d7-ab9f-590a1c482235" (UID: "489b3fb9-e4f8-40d7-ab9f-590a1c482235"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.350691 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "489b3fb9-e4f8-40d7-ab9f-590a1c482235" (UID: "489b3fb9-e4f8-40d7-ab9f-590a1c482235"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.384117 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-579hg"] Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.388381 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "489b3fb9-e4f8-40d7-ab9f-590a1c482235" (UID: "489b3fb9-e4f8-40d7-ab9f-590a1c482235"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.395712 4840 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.395732 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.395743 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.444774 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "489b3fb9-e4f8-40d7-ab9f-590a1c482235" (UID: "489b3fb9-e4f8-40d7-ab9f-590a1c482235"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.487634 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" event={"ID":"489b3fb9-e4f8-40d7-ab9f-590a1c482235","Type":"ContainerDied","Data":"440f5c24f4401aaa6d9155f28618e4ee2fff407f472ec31226aff4153b7bc790"} Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.487683 4840 scope.go:117] "RemoveContainer" containerID="7e4d910f3a737d8eb5d0a46a77ffce9a085d0ffcfab2cfd0fd52d78722fd7813" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.487816 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-7c7v9" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.495351 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.499471 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/489b3fb9-e4f8-40d7-ab9f-590a1c482235-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.503632 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" event={"ID":"40aff238-497c-4504-b6e0-ca86d21d7888","Type":"ContainerStarted","Data":"d6c3c200d57aa42ff523a8e219efe1db6c5a334e3fec36c736801b81a907d0ea"} Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.656536 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-storageinit-ddrkz"] Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.689366 4840 scope.go:117] "RemoveContainer" containerID="6e6d48074944b2d50efc11e3d3b7f9a65e513886562bbfc0fc09797dee964491" Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.706080 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7c7v9"] Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.726848 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7c7v9"] Dec 09 17:18:20 crc kubenswrapper[4840]: W1209 17:18:20.776308 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod827d00c4_cdf4_43ad_bb9c_746c36adb391.slice/crio-1734c2e85b028b51b515f23f597097ce8cd8515077005dcca81c992c0e6812aa WatchSource:0}: Error finding container 1734c2e85b028b51b515f23f597097ce8cd8515077005dcca81c992c0e6812aa: Status 404 returned error can't find the container with id 1734c2e85b028b51b515f23f597097ce8cd8515077005dcca81c992c0e6812aa Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.783469 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 17:18:20 crc kubenswrapper[4840]: I1209 17:18:20.823840 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.513051 4840 generic.go:334] "Generic (PLEG): container finished" podID="40aff238-497c-4504-b6e0-ca86d21d7888" containerID="ee8169733abf3bb105b991e818a5a76416463925e051f0cdf4a89cbbd9313345" exitCode=0 Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.513246 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" event={"ID":"40aff238-497c-4504-b6e0-ca86d21d7888","Type":"ContainerDied","Data":"ee8169733abf3bb105b991e818a5a76416463925e051f0cdf4a89cbbd9313345"} Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.525296 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-storageinit-ddrkz" event={"ID":"42aee3b4-245f-4a6c-8765-ea5b407d0c2e","Type":"ContainerStarted","Data":"f39c557d83b361b7cba8e942511dda27a1a7e5a9942ad897a51fadfcfab22f62"} Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.525342 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-storageinit-ddrkz" event={"ID":"42aee3b4-245f-4a6c-8765-ea5b407d0c2e","Type":"ContainerStarted","Data":"e5d824bd5975b675be1a48be518a6510e07d9d6adc199c00b7bda4a6e04bd0bd"} Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.536783 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c2d58920-cf1a-4862-83c1-36819aec0ef8","Type":"ContainerStarted","Data":"4cd9c206c92124f55c6811a1cf9e380fd7481ba67b24d79a7c2d421a71755ced"} Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.542658 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"827d00c4-cdf4-43ad-bb9c-746c36adb391","Type":"ContainerStarted","Data":"1734c2e85b028b51b515f23f597097ce8cd8515077005dcca81c992c0e6812aa"} Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.563638 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-storageinit-ddrkz" podStartSLOduration=4.563623506 podStartE2EDuration="4.563623506s" podCreationTimestamp="2025-12-09 17:18:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:21.562834264 +0000 UTC m=+1287.553944897" watchObservedRunningTime="2025-12-09 17:18:21.563623506 +0000 UTC m=+1287.554734139" Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.576824 4840 generic.go:334] "Generic (PLEG): container finished" podID="a4e32138-7ab5-4f68-bad5-554ba844c8a0" containerID="ff658c597a8102e9b2fe318077182e1f58c711b231db3f360ee95859b37969a8" exitCode=0 Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.576890 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57dff6db4d-sszz8" event={"ID":"a4e32138-7ab5-4f68-bad5-554ba844c8a0","Type":"ContainerDied","Data":"ff658c597a8102e9b2fe318077182e1f58c711b231db3f360ee95859b37969a8"} Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.593265 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f","Type":"ContainerStarted","Data":"6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71"} Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.593525 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="ceilometer-central-agent" containerID="cri-o://ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a" gracePeriod=30 Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.593592 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="sg-core" containerID="cri-o://b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16" gracePeriod=30 Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.593626 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="ceilometer-notification-agent" containerID="cri-o://5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7" gracePeriod=30 Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.593595 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="proxy-httpd" containerID="cri-o://6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71" gracePeriod=30 Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.593878 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.634935 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.590903039 podStartE2EDuration="1m8.634890856s" podCreationTimestamp="2025-12-09 17:17:13 +0000 UTC" firstStartedPulling="2025-12-09 17:17:14.721599295 +0000 UTC m=+1220.712709918" lastFinishedPulling="2025-12-09 17:18:19.765587102 +0000 UTC m=+1285.756697735" observedRunningTime="2025-12-09 17:18:21.626197662 +0000 UTC m=+1287.617308295" watchObservedRunningTime="2025-12-09 17:18:21.634890856 +0000 UTC m=+1287.626001479" Dec 09 17:18:21 crc kubenswrapper[4840]: I1209 17:18:21.991364 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.025368 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.074507 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-combined-ca-bundle\") pod \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.074549 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-config\") pod \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.074623 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6mnm\" (UniqueName: \"kubernetes.io/projected/a4e32138-7ab5-4f68-bad5-554ba844c8a0-kube-api-access-j6mnm\") pod \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.074707 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-httpd-config\") pod \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.075123 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-ovndb-tls-certs\") pod \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\" (UID: \"a4e32138-7ab5-4f68-bad5-554ba844c8a0\") " Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.091690 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4e32138-7ab5-4f68-bad5-554ba844c8a0-kube-api-access-j6mnm" (OuterVolumeSpecName: "kube-api-access-j6mnm") pod "a4e32138-7ab5-4f68-bad5-554ba844c8a0" (UID: "a4e32138-7ab5-4f68-bad5-554ba844c8a0"). InnerVolumeSpecName "kube-api-access-j6mnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.108324 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "a4e32138-7ab5-4f68-bad5-554ba844c8a0" (UID: "a4e32138-7ab5-4f68-bad5-554ba844c8a0"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.178085 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6mnm\" (UniqueName: \"kubernetes.io/projected/a4e32138-7ab5-4f68-bad5-554ba844c8a0-kube-api-access-j6mnm\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.178312 4840 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-httpd-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.201205 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-config" (OuterVolumeSpecName: "config") pod "a4e32138-7ab5-4f68-bad5-554ba844c8a0" (UID: "a4e32138-7ab5-4f68-bad5-554ba844c8a0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.210399 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a4e32138-7ab5-4f68-bad5-554ba844c8a0" (UID: "a4e32138-7ab5-4f68-bad5-554ba844c8a0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.239131 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "a4e32138-7ab5-4f68-bad5-554ba844c8a0" (UID: "a4e32138-7ab5-4f68-bad5-554ba844c8a0"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.278853 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7c5899b866-c7lp6" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.282238 4840 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.282265 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.282275 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/a4e32138-7ab5-4f68-bad5-554ba844c8a0-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.375557 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7bfb47b7b8-d27mm"] Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.375821 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7bfb47b7b8-d27mm" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api" containerID="cri-o://9df212b5223e982effd724b349e838e5fcb80494502e14e236814caa91ebc1b7" gracePeriod=30 Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.379911 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7bfb47b7b8-d27mm" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api-log" containerID="cri-o://858f5486a14578e49078e05c95e2cdbda49fa8716d61579f0db90c140f1ec3fb" gracePeriod=30 Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.385975 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-7bfb47b7b8-d27mm" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.175:9311/healthcheck\": EOF" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.386231 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7bfb47b7b8-d27mm" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.175:9311/healthcheck\": EOF" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.621044 4840 generic.go:334] "Generic (PLEG): container finished" podID="ada58ef9-8583-4795-8540-093071fb0980" containerID="858f5486a14578e49078e05c95e2cdbda49fa8716d61579f0db90c140f1ec3fb" exitCode=143 Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.623371 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="489b3fb9-e4f8-40d7-ab9f-590a1c482235" path="/var/lib/kubelet/pods/489b3fb9-e4f8-40d7-ab9f-590a1c482235/volumes" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.624410 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7bfb47b7b8-d27mm" event={"ID":"ada58ef9-8583-4795-8540-093071fb0980","Type":"ContainerDied","Data":"858f5486a14578e49078e05c95e2cdbda49fa8716d61579f0db90c140f1ec3fb"} Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.626042 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c2d58920-cf1a-4862-83c1-36819aec0ef8","Type":"ContainerStarted","Data":"5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af"} Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.642211 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57dff6db4d-sszz8" event={"ID":"a4e32138-7ab5-4f68-bad5-554ba844c8a0","Type":"ContainerDied","Data":"3061786023d6c9c30e936f58a302b442939a7b7617ccd3a485bf405b1d382cfe"} Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.642465 4840 scope.go:117] "RemoveContainer" containerID="128af6f61276d2d10b57801721898ca87045fb44e4521f15733a7803df20a9e3" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.642590 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-57dff6db4d-sszz8" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.674390 4840 generic.go:334] "Generic (PLEG): container finished" podID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerID="6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71" exitCode=0 Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.674437 4840 generic.go:334] "Generic (PLEG): container finished" podID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerID="b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16" exitCode=2 Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.674446 4840 generic.go:334] "Generic (PLEG): container finished" podID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerID="ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a" exitCode=0 Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.674487 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f","Type":"ContainerDied","Data":"6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71"} Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.674531 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f","Type":"ContainerDied","Data":"b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16"} Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.674541 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f","Type":"ContainerDied","Data":"ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a"} Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.682743 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-57dff6db4d-sszz8"] Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.691279 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" event={"ID":"40aff238-497c-4504-b6e0-ca86d21d7888","Type":"ContainerStarted","Data":"ef652bdb03df7ff14f1be4d8566f7e12fd5b73eb8e3135e1f8765ee8137a69fc"} Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.700511 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-57dff6db4d-sszz8"] Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.718972 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" podStartSLOduration=5.718942208 podStartE2EDuration="5.718942208s" podCreationTimestamp="2025-12-09 17:18:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:22.713987079 +0000 UTC m=+1288.705097712" watchObservedRunningTime="2025-12-09 17:18:22.718942208 +0000 UTC m=+1288.710052841" Dec 09 17:18:22 crc kubenswrapper[4840]: I1209 17:18:22.757050 4840 scope.go:117] "RemoveContainer" containerID="ff658c597a8102e9b2fe318077182e1f58c711b231db3f360ee95859b37969a8" Dec 09 17:18:23 crc kubenswrapper[4840]: I1209 17:18:23.159459 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:23 crc kubenswrapper[4840]: I1209 17:18:23.713226 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c2d58920-cf1a-4862-83c1-36819aec0ef8","Type":"ContainerStarted","Data":"9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367"} Dec 09 17:18:23 crc kubenswrapper[4840]: I1209 17:18:23.713559 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 09 17:18:23 crc kubenswrapper[4840]: I1209 17:18:23.713564 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c2d58920-cf1a-4862-83c1-36819aec0ef8" containerName="cinder-api-log" containerID="cri-o://5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af" gracePeriod=30 Dec 09 17:18:23 crc kubenswrapper[4840]: I1209 17:18:23.713650 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c2d58920-cf1a-4862-83c1-36819aec0ef8" containerName="cinder-api" containerID="cri-o://9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367" gracePeriod=30 Dec 09 17:18:23 crc kubenswrapper[4840]: I1209 17:18:23.737466 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.737445382 podStartE2EDuration="6.737445382s" podCreationTimestamp="2025-12-09 17:18:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:23.734027116 +0000 UTC m=+1289.725137749" watchObservedRunningTime="2025-12-09 17:18:23.737445382 +0000 UTC m=+1289.728556015" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.314566 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.337310 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data\") pod \"c2d58920-cf1a-4862-83c1-36819aec0ef8\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.337474 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data-custom\") pod \"c2d58920-cf1a-4862-83c1-36819aec0ef8\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.337611 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-combined-ca-bundle\") pod \"c2d58920-cf1a-4862-83c1-36819aec0ef8\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.337721 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2d58920-cf1a-4862-83c1-36819aec0ef8-logs\") pod \"c2d58920-cf1a-4862-83c1-36819aec0ef8\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.337918 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-scripts\") pod \"c2d58920-cf1a-4862-83c1-36819aec0ef8\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.338013 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c2d58920-cf1a-4862-83c1-36819aec0ef8-etc-machine-id\") pod \"c2d58920-cf1a-4862-83c1-36819aec0ef8\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.338180 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4w6t\" (UniqueName: \"kubernetes.io/projected/c2d58920-cf1a-4862-83c1-36819aec0ef8-kube-api-access-b4w6t\") pod \"c2d58920-cf1a-4862-83c1-36819aec0ef8\" (UID: \"c2d58920-cf1a-4862-83c1-36819aec0ef8\") " Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.341899 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c2d58920-cf1a-4862-83c1-36819aec0ef8-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c2d58920-cf1a-4862-83c1-36819aec0ef8" (UID: "c2d58920-cf1a-4862-83c1-36819aec0ef8"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.343525 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-scripts" (OuterVolumeSpecName: "scripts") pod "c2d58920-cf1a-4862-83c1-36819aec0ef8" (UID: "c2d58920-cf1a-4862-83c1-36819aec0ef8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.343994 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2d58920-cf1a-4862-83c1-36819aec0ef8-logs" (OuterVolumeSpecName: "logs") pod "c2d58920-cf1a-4862-83c1-36819aec0ef8" (UID: "c2d58920-cf1a-4862-83c1-36819aec0ef8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.346787 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2d58920-cf1a-4862-83c1-36819aec0ef8-kube-api-access-b4w6t" (OuterVolumeSpecName: "kube-api-access-b4w6t") pod "c2d58920-cf1a-4862-83c1-36819aec0ef8" (UID: "c2d58920-cf1a-4862-83c1-36819aec0ef8"). InnerVolumeSpecName "kube-api-access-b4w6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.347896 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c2d58920-cf1a-4862-83c1-36819aec0ef8" (UID: "c2d58920-cf1a-4862-83c1-36819aec0ef8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.390890 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2d58920-cf1a-4862-83c1-36819aec0ef8" (UID: "c2d58920-cf1a-4862-83c1-36819aec0ef8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.406207 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data" (OuterVolumeSpecName: "config-data") pod "c2d58920-cf1a-4862-83c1-36819aec0ef8" (UID: "c2d58920-cf1a-4862-83c1-36819aec0ef8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.440346 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.440382 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2d58920-cf1a-4862-83c1-36819aec0ef8-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.440396 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.440407 4840 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c2d58920-cf1a-4862-83c1-36819aec0ef8-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.440418 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4w6t\" (UniqueName: \"kubernetes.io/projected/c2d58920-cf1a-4862-83c1-36819aec0ef8-kube-api-access-b4w6t\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.440433 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.440444 4840 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2d58920-cf1a-4862-83c1-36819aec0ef8-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.634070 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4e32138-7ab5-4f68-bad5-554ba844c8a0" path="/var/lib/kubelet/pods/a4e32138-7ab5-4f68-bad5-554ba844c8a0/volumes" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.738403 4840 generic.go:334] "Generic (PLEG): container finished" podID="42aee3b4-245f-4a6c-8765-ea5b407d0c2e" containerID="f39c557d83b361b7cba8e942511dda27a1a7e5a9942ad897a51fadfcfab22f62" exitCode=0 Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.738487 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-storageinit-ddrkz" event={"ID":"42aee3b4-245f-4a6c-8765-ea5b407d0c2e","Type":"ContainerDied","Data":"f39c557d83b361b7cba8e942511dda27a1a7e5a9942ad897a51fadfcfab22f62"} Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.742158 4840 generic.go:334] "Generic (PLEG): container finished" podID="c2d58920-cf1a-4862-83c1-36819aec0ef8" containerID="9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367" exitCode=0 Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.742199 4840 generic.go:334] "Generic (PLEG): container finished" podID="c2d58920-cf1a-4862-83c1-36819aec0ef8" containerID="5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af" exitCode=143 Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.742213 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.742222 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c2d58920-cf1a-4862-83c1-36819aec0ef8","Type":"ContainerDied","Data":"9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367"} Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.742278 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c2d58920-cf1a-4862-83c1-36819aec0ef8","Type":"ContainerDied","Data":"5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af"} Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.742300 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c2d58920-cf1a-4862-83c1-36819aec0ef8","Type":"ContainerDied","Data":"4cd9c206c92124f55c6811a1cf9e380fd7481ba67b24d79a7c2d421a71755ced"} Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.742327 4840 scope.go:117] "RemoveContainer" containerID="9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.751219 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"827d00c4-cdf4-43ad-bb9c-746c36adb391","Type":"ContainerStarted","Data":"c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25"} Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.751269 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"827d00c4-cdf4-43ad-bb9c-746c36adb391","Type":"ContainerStarted","Data":"cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320"} Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.777099 4840 scope.go:117] "RemoveContainer" containerID="5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.792466 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.808937 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.814674 4840 scope.go:117] "RemoveContainer" containerID="9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367" Dec 09 17:18:24 crc kubenswrapper[4840]: E1209 17:18:24.815074 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367\": container with ID starting with 9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367 not found: ID does not exist" containerID="9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.815111 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367"} err="failed to get container status \"9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367\": rpc error: code = NotFound desc = could not find container \"9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367\": container with ID starting with 9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367 not found: ID does not exist" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.815140 4840 scope.go:117] "RemoveContainer" containerID="5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af" Dec 09 17:18:24 crc kubenswrapper[4840]: E1209 17:18:24.815722 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af\": container with ID starting with 5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af not found: ID does not exist" containerID="5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.816696 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af"} err="failed to get container status \"5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af\": rpc error: code = NotFound desc = could not find container \"5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af\": container with ID starting with 5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af not found: ID does not exist" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.816739 4840 scope.go:117] "RemoveContainer" containerID="9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.824910 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367"} err="failed to get container status \"9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367\": rpc error: code = NotFound desc = could not find container \"9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367\": container with ID starting with 9f1754efb9e8ac7ee522b7378d0392fe9f29dfe1c8011a452ec6ec54850a2367 not found: ID does not exist" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.824974 4840 scope.go:117] "RemoveContainer" containerID="5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.828999 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af"} err="failed to get container status \"5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af\": rpc error: code = NotFound desc = could not find container \"5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af\": container with ID starting with 5016246bd3c527ed3eb9b77ece0327320c5a80919773e2307d9f6161ea13a6af not found: ID does not exist" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.841340 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.396881853 podStartE2EDuration="7.841315681s" podCreationTimestamp="2025-12-09 17:18:17 +0000 UTC" firstStartedPulling="2025-12-09 17:18:20.866147778 +0000 UTC m=+1286.857258411" lastFinishedPulling="2025-12-09 17:18:23.310581596 +0000 UTC m=+1289.301692239" observedRunningTime="2025-12-09 17:18:24.784513598 +0000 UTC m=+1290.775624231" watchObservedRunningTime="2025-12-09 17:18:24.841315681 +0000 UTC m=+1290.832426334" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.857863 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 09 17:18:24 crc kubenswrapper[4840]: E1209 17:18:24.858354 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4e32138-7ab5-4f68-bad5-554ba844c8a0" containerName="neutron-httpd" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.858379 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4e32138-7ab5-4f68-bad5-554ba844c8a0" containerName="neutron-httpd" Dec 09 17:18:24 crc kubenswrapper[4840]: E1209 17:18:24.858397 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4e32138-7ab5-4f68-bad5-554ba844c8a0" containerName="neutron-api" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.858405 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4e32138-7ab5-4f68-bad5-554ba844c8a0" containerName="neutron-api" Dec 09 17:18:24 crc kubenswrapper[4840]: E1209 17:18:24.858419 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d58920-cf1a-4862-83c1-36819aec0ef8" containerName="cinder-api-log" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.858429 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d58920-cf1a-4862-83c1-36819aec0ef8" containerName="cinder-api-log" Dec 09 17:18:24 crc kubenswrapper[4840]: E1209 17:18:24.858442 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d58920-cf1a-4862-83c1-36819aec0ef8" containerName="cinder-api" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.858451 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d58920-cf1a-4862-83c1-36819aec0ef8" containerName="cinder-api" Dec 09 17:18:24 crc kubenswrapper[4840]: E1209 17:18:24.858532 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="489b3fb9-e4f8-40d7-ab9f-590a1c482235" containerName="dnsmasq-dns" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.858544 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="489b3fb9-e4f8-40d7-ab9f-590a1c482235" containerName="dnsmasq-dns" Dec 09 17:18:24 crc kubenswrapper[4840]: E1209 17:18:24.858564 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="489b3fb9-e4f8-40d7-ab9f-590a1c482235" containerName="init" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.858571 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="489b3fb9-e4f8-40d7-ab9f-590a1c482235" containerName="init" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.858807 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2d58920-cf1a-4862-83c1-36819aec0ef8" containerName="cinder-api" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.858836 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4e32138-7ab5-4f68-bad5-554ba844c8a0" containerName="neutron-httpd" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.858853 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2d58920-cf1a-4862-83c1-36819aec0ef8" containerName="cinder-api-log" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.858866 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4e32138-7ab5-4f68-bad5-554ba844c8a0" containerName="neutron-api" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.858881 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="489b3fb9-e4f8-40d7-ab9f-590a1c482235" containerName="dnsmasq-dns" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.860451 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.864588 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.864741 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.864839 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.870432 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 09 17:18:24 crc kubenswrapper[4840]: E1209 17:18:24.873597 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f6c52e_2e47_442e_80fe_a03f7b9582fe.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2d58920_cf1a_4862_83c1_36819aec0ef8.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f6c52e_2e47_442e_80fe_a03f7b9582fe.slice/crio-3cba781eb1a23b331b4482d7a007e9bda24e64b87799ef7b46741feabbf661bc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2d58920_cf1a_4862_83c1_36819aec0ef8.slice/crio-4cd9c206c92124f55c6811a1cf9e380fd7481ba67b24d79a7c2d421a71755ced\": RecentStats: unable to find data in memory cache]" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.949157 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-scripts\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.949298 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.949327 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8cft\" (UniqueName: \"kubernetes.io/projected/b22b06eb-f287-43cf-abc6-9cb5580fa71a-kube-api-access-j8cft\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.949429 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b22b06eb-f287-43cf-abc6-9cb5580fa71a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.949458 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.949476 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b22b06eb-f287-43cf-abc6-9cb5580fa71a-logs\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.949548 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-config-data-custom\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.949617 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-config-data\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:24 crc kubenswrapper[4840]: I1209 17:18:24.949653 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.051980 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b22b06eb-f287-43cf-abc6-9cb5580fa71a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.052040 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.052058 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b22b06eb-f287-43cf-abc6-9cb5580fa71a-logs\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.052080 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-config-data-custom\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.052094 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-config-data\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.052109 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.052141 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-scripts\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.052229 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.052247 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8cft\" (UniqueName: \"kubernetes.io/projected/b22b06eb-f287-43cf-abc6-9cb5580fa71a-kube-api-access-j8cft\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.052786 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b22b06eb-f287-43cf-abc6-9cb5580fa71a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.053051 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b22b06eb-f287-43cf-abc6-9cb5580fa71a-logs\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.058574 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.058782 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.063671 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.064731 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-config-data\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.064848 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-config-data-custom\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.064909 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b22b06eb-f287-43cf-abc6-9cb5580fa71a-scripts\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.070861 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8cft\" (UniqueName: \"kubernetes.io/projected/b22b06eb-f287-43cf-abc6-9cb5580fa71a-kube-api-access-j8cft\") pod \"cinder-api-0\" (UID: \"b22b06eb-f287-43cf-abc6-9cb5580fa71a\") " pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.222531 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 17:18:25 crc kubenswrapper[4840]: I1209 17:18:25.758183 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.333757 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.378628 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-scripts\") pod \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.378724 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-certs\") pod \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.378793 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dxql6\" (UniqueName: \"kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-kube-api-access-dxql6\") pod \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.378851 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-combined-ca-bundle\") pod \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.378896 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-config-data\") pod \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\" (UID: \"42aee3b4-245f-4a6c-8765-ea5b407d0c2e\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.402195 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-certs" (OuterVolumeSpecName: "certs") pod "42aee3b4-245f-4a6c-8765-ea5b407d0c2e" (UID: "42aee3b4-245f-4a6c-8765-ea5b407d0c2e"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.404358 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-kube-api-access-dxql6" (OuterVolumeSpecName: "kube-api-access-dxql6") pod "42aee3b4-245f-4a6c-8765-ea5b407d0c2e" (UID: "42aee3b4-245f-4a6c-8765-ea5b407d0c2e"). InnerVolumeSpecName "kube-api-access-dxql6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.408044 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-scripts" (OuterVolumeSpecName: "scripts") pod "42aee3b4-245f-4a6c-8765-ea5b407d0c2e" (UID: "42aee3b4-245f-4a6c-8765-ea5b407d0c2e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.408230 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-config-data" (OuterVolumeSpecName: "config-data") pod "42aee3b4-245f-4a6c-8765-ea5b407d0c2e" (UID: "42aee3b4-245f-4a6c-8765-ea5b407d0c2e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.443197 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "42aee3b4-245f-4a6c-8765-ea5b407d0c2e" (UID: "42aee3b4-245f-4a6c-8765-ea5b407d0c2e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.485269 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.485308 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.485320 4840 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.485331 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dxql6\" (UniqueName: \"kubernetes.io/projected/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-kube-api-access-dxql6\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.485345 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42aee3b4-245f-4a6c-8765-ea5b407d0c2e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.590779 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.631049 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2d58920-cf1a-4862-83c1-36819aec0ef8" path="/var/lib/kubelet/pods/c2d58920-cf1a-4862-83c1-36819aec0ef8/volumes" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.689297 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-scripts\") pod \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.689375 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9srgf\" (UniqueName: \"kubernetes.io/projected/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-kube-api-access-9srgf\") pod \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.689457 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-config-data\") pod \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.689482 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-combined-ca-bundle\") pod \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.689537 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-log-httpd\") pod \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.689628 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-sg-core-conf-yaml\") pod \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.689681 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-run-httpd\") pod \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\" (UID: \"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f\") " Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.690421 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" (UID: "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.695517 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" (UID: "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.713480 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-scripts" (OuterVolumeSpecName: "scripts") pod "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" (UID: "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.715551 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-kube-api-access-9srgf" (OuterVolumeSpecName: "kube-api-access-9srgf") pod "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" (UID: "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f"). InnerVolumeSpecName "kube-api-access-9srgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.729984 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" (UID: "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.791449 4840 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.791487 4840 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.791496 4840 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.791506 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.791514 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9srgf\" (UniqueName: \"kubernetes.io/projected/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-kube-api-access-9srgf\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.797054 4840 generic.go:334] "Generic (PLEG): container finished" podID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerID="5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7" exitCode=0 Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.797111 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f","Type":"ContainerDied","Data":"5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7"} Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.797137 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7167f9ca-e4c8-413e-984b-d6b6ddca0e2f","Type":"ContainerDied","Data":"cb1a3c61d39dea862f5e1df48256b9e4689a85e4415957ac0c37b6131eb10337"} Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.797154 4840 scope.go:117] "RemoveContainer" containerID="6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.797265 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.803581 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b22b06eb-f287-43cf-abc6-9cb5580fa71a","Type":"ContainerStarted","Data":"91126b735b34471eb8bb42f802454392afc385f4a9c2eb7b176e41091060aa6c"} Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.803627 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b22b06eb-f287-43cf-abc6-9cb5580fa71a","Type":"ContainerStarted","Data":"697a5f2a1122681539cd3b6c2d3a9d1bfa690a7d69f588c28a5f7ea6f989bce8"} Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.804794 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-storageinit-ddrkz" event={"ID":"42aee3b4-245f-4a6c-8765-ea5b407d0c2e","Type":"ContainerDied","Data":"e5d824bd5975b675be1a48be518a6510e07d9d6adc199c00b7bda4a6e04bd0bd"} Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.804830 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5d824bd5975b675be1a48be518a6510e07d9d6adc199c00b7bda4a6e04bd0bd" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.804884 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-storageinit-ddrkz" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.822046 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" (UID: "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.828834 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7bfb47b7b8-d27mm" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.175:9311/healthcheck\": read tcp 10.217.0.2:38914->10.217.0.175:9311: read: connection reset by peer" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.829093 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7bfb47b7b8-d27mm" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.175:9311/healthcheck\": read tcp 10.217.0.2:38904->10.217.0.175:9311: read: connection reset by peer" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.842894 4840 scope.go:117] "RemoveContainer" containerID="b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.893240 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.894085 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-config-data" (OuterVolumeSpecName: "config-data") pod "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" (UID: "7167f9ca-e4c8-413e-984b-d6b6ddca0e2f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:26 crc kubenswrapper[4840]: I1209 17:18:26.997233 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.069723 4840 scope.go:117] "RemoveContainer" containerID="5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.201577 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 09 17:18:27 crc kubenswrapper[4840]: E1209 17:18:27.208639 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="proxy-httpd" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.208671 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="proxy-httpd" Dec 09 17:18:27 crc kubenswrapper[4840]: E1209 17:18:27.208697 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="sg-core" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.208703 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="sg-core" Dec 09 17:18:27 crc kubenswrapper[4840]: E1209 17:18:27.208714 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="ceilometer-central-agent" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.208720 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="ceilometer-central-agent" Dec 09 17:18:27 crc kubenswrapper[4840]: E1209 17:18:27.208730 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="ceilometer-notification-agent" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.208736 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="ceilometer-notification-agent" Dec 09 17:18:27 crc kubenswrapper[4840]: E1209 17:18:27.208755 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42aee3b4-245f-4a6c-8765-ea5b407d0c2e" containerName="cloudkitty-storageinit" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.208761 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="42aee3b4-245f-4a6c-8765-ea5b407d0c2e" containerName="cloudkitty-storageinit" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.209021 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="ceilometer-central-agent" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.209044 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="sg-core" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.209054 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="proxy-httpd" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.209064 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="42aee3b4-245f-4a6c-8765-ea5b407d0c2e" containerName="cloudkitty-storageinit" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.209077 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" containerName="ceilometer-notification-agent" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.209774 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.226464 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cloudkitty-client-internal" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.226703 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-proc-config-data" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.226857 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-scripts" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.227138 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-config-data" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.230102 4840 scope.go:117] "RemoveContainer" containerID="ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.236530 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-cloudkitty-dockercfg-4pj7n" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.250012 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.274544 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.286404 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.307035 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.312725 4840 scope.go:117] "RemoveContainer" containerID="6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71" Dec 09 17:18:27 crc kubenswrapper[4840]: E1209 17:18:27.314506 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71\": container with ID starting with 6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71 not found: ID does not exist" containerID="6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.314548 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71"} err="failed to get container status \"6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71\": rpc error: code = NotFound desc = could not find container \"6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71\": container with ID starting with 6dbd6e7907469c363c83d305c3f9f39f4e08de8a945c7a7af9ab9227206aae71 not found: ID does not exist" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.314574 4840 scope.go:117] "RemoveContainer" containerID="b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.315626 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.322311 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lvxd\" (UniqueName: \"kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-kube-api-access-9lvxd\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.322381 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.322461 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-scripts\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.322572 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.322632 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-certs\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.322793 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.322991 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.323515 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.345546 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-579hg"] Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.345842 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" podUID="40aff238-497c-4504-b6e0-ca86d21d7888" containerName="dnsmasq-dns" containerID="cri-o://ef652bdb03df7ff14f1be4d8566f7e12fd5b73eb8e3135e1f8765ee8137a69fc" gracePeriod=10 Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.351339 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:27 crc kubenswrapper[4840]: E1209 17:18:27.353084 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16\": container with ID starting with b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16 not found: ID does not exist" containerID="b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.353135 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16"} err="failed to get container status \"b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16\": rpc error: code = NotFound desc = could not find container \"b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16\": container with ID starting with b2fd9e710de6b8d7bcf4f536105765ba4a3da29a14fb9597ccabd354b8f49e16 not found: ID does not exist" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.353166 4840 scope.go:117] "RemoveContainer" containerID="5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7" Dec 09 17:18:27 crc kubenswrapper[4840]: E1209 17:18:27.359434 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7\": container with ID starting with 5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7 not found: ID does not exist" containerID="5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.359481 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7"} err="failed to get container status \"5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7\": rpc error: code = NotFound desc = could not find container \"5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7\": container with ID starting with 5729b289942cc95aa7569dd512b7bea4ba277f1502fa1ebb18e3895ab652ebc7 not found: ID does not exist" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.359507 4840 scope.go:117] "RemoveContainer" containerID="ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a" Dec 09 17:18:27 crc kubenswrapper[4840]: E1209 17:18:27.361459 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a\": container with ID starting with ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a not found: ID does not exist" containerID="ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.361501 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a"} err="failed to get container status \"ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a\": rpc error: code = NotFound desc = could not find container \"ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a\": container with ID starting with ed154acdbcacb21db6b42e2747c92a86e84cfc19e9529d4637abd1637d12a10a not found: ID does not exist" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.387070 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.415087 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-d5bzm"] Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.416724 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427136 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-scripts\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427219 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-run-httpd\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427255 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-scripts\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427274 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427300 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427344 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-log-httpd\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427362 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-certs\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427377 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-config-data\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427396 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp88z\" (UniqueName: \"kubernetes.io/projected/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-kube-api-access-kp88z\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427416 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427443 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427471 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lvxd\" (UniqueName: \"kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-kube-api-access-9lvxd\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.427498 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.448974 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-certs\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.449282 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-scripts\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.450605 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.453209 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.454487 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.470094 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-d5bzm"] Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.485660 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lvxd\" (UniqueName: \"kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-kube-api-access-9lvxd\") pod \"cloudkitty-proc-0\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.505509 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-api-0"] Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.546766 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.549758 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-api-config-data" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.555470 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-scripts\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.555548 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.555578 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrfqr\" (UniqueName: \"kubernetes.io/projected/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-kube-api-access-lrfqr\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.555629 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.555647 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tsfv\" (UniqueName: \"kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-kube-api-access-5tsfv\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.555699 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-log-httpd\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.555719 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-config-data\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.555743 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp88z\" (UniqueName: \"kubernetes.io/projected/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-kube-api-access-kp88z\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.555765 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-swift-storage-0\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.555789 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.555826 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-scripts\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.556503 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-log-httpd\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.556889 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-svc\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.556930 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-config\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.556990 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-nb\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.557019 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-sb\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.557049 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-certs\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.557080 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/081ab362-abc8-4132-8452-bfb0fb02f798-logs\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.557117 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.557178 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.557201 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-run-httpd\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.558016 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-run-httpd\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.560805 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-config-data\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.572128 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-scripts\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.572898 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.574561 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.587861 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.591216 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.592237 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp88z\" (UniqueName: \"kubernetes.io/projected/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-kube-api-access-kp88z\") pod \"ceilometer-0\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.661980 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-sb\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662017 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-certs\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662043 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/081ab362-abc8-4132-8452-bfb0fb02f798-logs\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662067 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662092 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662148 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrfqr\" (UniqueName: \"kubernetes.io/projected/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-kube-api-access-lrfqr\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662187 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662205 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tsfv\" (UniqueName: \"kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-kube-api-access-5tsfv\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662263 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-swift-storage-0\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662313 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-scripts\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662372 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-svc\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662396 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-config\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.662439 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-nb\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.663410 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-nb\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.665995 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-sb\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.666633 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/081ab362-abc8-4132-8452-bfb0fb02f798-logs\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.668315 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-swift-storage-0\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.668463 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-svc\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.668816 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-config\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.670364 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-scripts\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.671766 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.673476 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.675624 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.690590 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-certs\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.691062 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.700710 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrfqr\" (UniqueName: \"kubernetes.io/projected/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-kube-api-access-lrfqr\") pod \"dnsmasq-dns-86d9875b97-d5bzm\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.703806 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tsfv\" (UniqueName: \"kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-kube-api-access-5tsfv\") pod \"cloudkitty-api-0\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.717998 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.816003 4840 generic.go:334] "Generic (PLEG): container finished" podID="40aff238-497c-4504-b6e0-ca86d21d7888" containerID="ef652bdb03df7ff14f1be4d8566f7e12fd5b73eb8e3135e1f8765ee8137a69fc" exitCode=0 Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.816051 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" event={"ID":"40aff238-497c-4504-b6e0-ca86d21d7888","Type":"ContainerDied","Data":"ef652bdb03df7ff14f1be4d8566f7e12fd5b73eb8e3135e1f8765ee8137a69fc"} Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.858466 4840 generic.go:334] "Generic (PLEG): container finished" podID="ada58ef9-8583-4795-8540-093071fb0980" containerID="9df212b5223e982effd724b349e838e5fcb80494502e14e236814caa91ebc1b7" exitCode=0 Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.858508 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7bfb47b7b8-d27mm" event={"ID":"ada58ef9-8583-4795-8540-093071fb0980","Type":"ContainerDied","Data":"9df212b5223e982effd724b349e838e5fcb80494502e14e236814caa91ebc1b7"} Dec 09 17:18:27 crc kubenswrapper[4840]: I1209 17:18:27.972502 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:27.998608 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.082958 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-86649f76d6-p6jhc" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.251453 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.253215 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.280953 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-combined-ca-bundle\") pod \"ada58ef9-8583-4795-8540-093071fb0980\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.281024 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ada58ef9-8583-4795-8540-093071fb0980-logs\") pod \"ada58ef9-8583-4795-8540-093071fb0980\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.281063 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-nb\") pod \"40aff238-497c-4504-b6e0-ca86d21d7888\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.281124 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngc9s\" (UniqueName: \"kubernetes.io/projected/ada58ef9-8583-4795-8540-093071fb0980-kube-api-access-ngc9s\") pod \"ada58ef9-8583-4795-8540-093071fb0980\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.281208 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-swift-storage-0\") pod \"40aff238-497c-4504-b6e0-ca86d21d7888\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.281244 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-sb\") pod \"40aff238-497c-4504-b6e0-ca86d21d7888\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.281356 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-svc\") pod \"40aff238-497c-4504-b6e0-ca86d21d7888\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.281397 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data\") pod \"ada58ef9-8583-4795-8540-093071fb0980\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.281435 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-config\") pod \"40aff238-497c-4504-b6e0-ca86d21d7888\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.281464 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data-custom\") pod \"ada58ef9-8583-4795-8540-093071fb0980\" (UID: \"ada58ef9-8583-4795-8540-093071fb0980\") " Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.281512 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m57q5\" (UniqueName: \"kubernetes.io/projected/40aff238-497c-4504-b6e0-ca86d21d7888-kube-api-access-m57q5\") pod \"40aff238-497c-4504-b6e0-ca86d21d7888\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.282522 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ada58ef9-8583-4795-8540-093071fb0980-logs" (OuterVolumeSpecName: "logs") pod "ada58ef9-8583-4795-8540-093071fb0980" (UID: "ada58ef9-8583-4795-8540-093071fb0980"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.294356 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ada58ef9-8583-4795-8540-093071fb0980-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.314847 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ada58ef9-8583-4795-8540-093071fb0980-kube-api-access-ngc9s" (OuterVolumeSpecName: "kube-api-access-ngc9s") pod "ada58ef9-8583-4795-8540-093071fb0980" (UID: "ada58ef9-8583-4795-8540-093071fb0980"). InnerVolumeSpecName "kube-api-access-ngc9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.314876 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ada58ef9-8583-4795-8540-093071fb0980" (UID: "ada58ef9-8583-4795-8540-093071fb0980"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.315701 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40aff238-497c-4504-b6e0-ca86d21d7888-kube-api-access-m57q5" (OuterVolumeSpecName: "kube-api-access-m57q5") pod "40aff238-497c-4504-b6e0-ca86d21d7888" (UID: "40aff238-497c-4504-b6e0-ca86d21d7888"). InnerVolumeSpecName "kube-api-access-m57q5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.367647 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ada58ef9-8583-4795-8540-093071fb0980" (UID: "ada58ef9-8583-4795-8540-093071fb0980"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.388731 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "40aff238-497c-4504-b6e0-ca86d21d7888" (UID: "40aff238-497c-4504-b6e0-ca86d21d7888"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.399925 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "40aff238-497c-4504-b6e0-ca86d21d7888" (UID: "40aff238-497c-4504-b6e0-ca86d21d7888"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.401219 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-svc\") pod \"40aff238-497c-4504-b6e0-ca86d21d7888\" (UID: \"40aff238-497c-4504-b6e0-ca86d21d7888\") " Dec 09 17:18:28 crc kubenswrapper[4840]: W1209 17:18:28.401393 4840 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/40aff238-497c-4504-b6e0-ca86d21d7888/volumes/kubernetes.io~configmap/dns-svc Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.401426 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "40aff238-497c-4504-b6e0-ca86d21d7888" (UID: "40aff238-497c-4504-b6e0-ca86d21d7888"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.405737 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngc9s\" (UniqueName: \"kubernetes.io/projected/ada58ef9-8583-4795-8540-093071fb0980-kube-api-access-ngc9s\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.405769 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.405781 4840 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.405793 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m57q5\" (UniqueName: \"kubernetes.io/projected/40aff238-497c-4504-b6e0-ca86d21d7888-kube-api-access-m57q5\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.405805 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.405816 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.409461 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-config" (OuterVolumeSpecName: "config") pod "40aff238-497c-4504-b6e0-ca86d21d7888" (UID: "40aff238-497c-4504-b6e0-ca86d21d7888"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.422479 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "40aff238-497c-4504-b6e0-ca86d21d7888" (UID: "40aff238-497c-4504-b6e0-ca86d21d7888"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.426828 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "40aff238-497c-4504-b6e0-ca86d21d7888" (UID: "40aff238-497c-4504-b6e0-ca86d21d7888"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.437059 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data" (OuterVolumeSpecName: "config-data") pod "ada58ef9-8583-4795-8540-093071fb0980" (UID: "ada58ef9-8583-4795-8540-093071fb0980"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.508190 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ada58ef9-8583-4795-8540-093071fb0980-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.508222 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.508234 4840 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.508248 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40aff238-497c-4504-b6e0-ca86d21d7888-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.625697 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7167f9ca-e4c8-413e-984b-d6b6ddca0e2f" path="/var/lib/kubelet/pods/7167f9ca-e4c8-413e-984b-d6b6ddca0e2f/volumes" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.647679 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 09 17:18:28 crc kubenswrapper[4840]: W1209 17:18:28.660523 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6bb5f91_fdd2_424f_964a_f73a3ee8f8b3.slice/crio-e129fb3e85910b901f2195697a95524cafe1f3d10162bd6308e1b24cf261fbe2 WatchSource:0}: Error finding container e129fb3e85910b901f2195697a95524cafe1f3d10162bd6308e1b24cf261fbe2: Status 404 returned error can't find the container with id e129fb3e85910b901f2195697a95524cafe1f3d10162bd6308e1b24cf261fbe2 Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.693650 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 17:18:28 crc kubenswrapper[4840]: W1209 17:18:28.719134 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod081ab362_abc8_4132_8452_bfb0fb02f798.slice/crio-8c343610bf598caad1f5c34a8e7c65003330a508609d4c156c06743870e3eed5 WatchSource:0}: Error finding container 8c343610bf598caad1f5c34a8e7c65003330a508609d4c156c06743870e3eed5: Status 404 returned error can't find the container with id 8c343610bf598caad1f5c34a8e7c65003330a508609d4c156c06743870e3eed5 Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.728066 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.750175 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.908553 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-d5bzm"] Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.920583 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b22b06eb-f287-43cf-abc6-9cb5580fa71a","Type":"ContainerStarted","Data":"3151de45b73fa3a385540aa5dc5f5ac414b7ab25152a8b4305b7aa118df66650"} Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.921523 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.945407 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7bfb47b7b8-d27mm" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.945437 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7bfb47b7b8-d27mm" event={"ID":"ada58ef9-8583-4795-8540-093071fb0980","Type":"ContainerDied","Data":"c7c780abbb40a993c43eeda2cbae1b6bc78b84ca1bacedf5fdd12081644e2207"} Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.945488 4840 scope.go:117] "RemoveContainer" containerID="9df212b5223e982effd724b349e838e5fcb80494502e14e236814caa91ebc1b7" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.963532 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3","Type":"ContainerStarted","Data":"e129fb3e85910b901f2195697a95524cafe1f3d10162bd6308e1b24cf261fbe2"} Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.976473 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e0cfccf-e405-4bf7-84be-ac81e2547a3e","Type":"ContainerStarted","Data":"02da8daafddf9b986b99c625d4d83e10495233d407ac32d9e539efe8bc5f8e18"} Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.976773 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.976753991 podStartE2EDuration="4.976753991s" podCreationTimestamp="2025-12-09 17:18:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:28.945444902 +0000 UTC m=+1294.936555535" watchObservedRunningTime="2025-12-09 17:18:28.976753991 +0000 UTC m=+1294.967864624" Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.992109 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"081ab362-abc8-4132-8452-bfb0fb02f798","Type":"ContainerStarted","Data":"8c343610bf598caad1f5c34a8e7c65003330a508609d4c156c06743870e3eed5"} Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.992947 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" event={"ID":"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f","Type":"ContainerStarted","Data":"a90812096ec6b9e6278eea106c4aeca255da5be8e20df554e8f1a7116b8801f5"} Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.996236 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" event={"ID":"40aff238-497c-4504-b6e0-ca86d21d7888","Type":"ContainerDied","Data":"d6c3c200d57aa42ff523a8e219efe1db6c5a334e3fec36c736801b81a907d0ea"} Dec 09 17:18:28 crc kubenswrapper[4840]: I1209 17:18:28.996518 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" Dec 09 17:18:29 crc kubenswrapper[4840]: I1209 17:18:29.061392 4840 scope.go:117] "RemoveContainer" containerID="858f5486a14578e49078e05c95e2cdbda49fa8716d61579f0db90c140f1ec3fb" Dec 09 17:18:29 crc kubenswrapper[4840]: I1209 17:18:29.077013 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7bfb47b7b8-d27mm"] Dec 09 17:18:29 crc kubenswrapper[4840]: I1209 17:18:29.091744 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-7bfb47b7b8-d27mm"] Dec 09 17:18:29 crc kubenswrapper[4840]: I1209 17:18:29.099560 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-579hg"] Dec 09 17:18:29 crc kubenswrapper[4840]: I1209 17:18:29.107054 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-579hg"] Dec 09 17:18:29 crc kubenswrapper[4840]: I1209 17:18:29.143470 4840 scope.go:117] "RemoveContainer" containerID="ef652bdb03df7ff14f1be4d8566f7e12fd5b73eb8e3135e1f8765ee8137a69fc" Dec 09 17:18:29 crc kubenswrapper[4840]: I1209 17:18:29.178851 4840 scope.go:117] "RemoveContainer" containerID="ee8169733abf3bb105b991e818a5a76416463925e051f0cdf4a89cbbd9313345" Dec 09 17:18:30 crc kubenswrapper[4840]: I1209 17:18:30.062484 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e0cfccf-e405-4bf7-84be-ac81e2547a3e","Type":"ContainerStarted","Data":"0f5e7249dcfb89ee8f3a24f02cfe01779a4821a3de34ce30c130ec1154f6f867"} Dec 09 17:18:30 crc kubenswrapper[4840]: I1209 17:18:30.070391 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"081ab362-abc8-4132-8452-bfb0fb02f798","Type":"ContainerStarted","Data":"1467d208ea32d859df8a05a5b72c263ce5cad379ba7c28998b01347b8a3f7bfb"} Dec 09 17:18:30 crc kubenswrapper[4840]: I1209 17:18:30.070437 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"081ab362-abc8-4132-8452-bfb0fb02f798","Type":"ContainerStarted","Data":"1bfdc0634ff867690a6b1d4e29a43e9a48a9b01a7f995a4d5a45b6075a33d936"} Dec 09 17:18:30 crc kubenswrapper[4840]: I1209 17:18:30.070704 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-api-0" Dec 09 17:18:30 crc kubenswrapper[4840]: I1209 17:18:30.072551 4840 generic.go:334] "Generic (PLEG): container finished" podID="83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" containerID="973aaa1c5dfce10cd0c3b0b0d914e087daab3e60f2cdd4ae7bb3cd0b835077fd" exitCode=0 Dec 09 17:18:30 crc kubenswrapper[4840]: I1209 17:18:30.073139 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" event={"ID":"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f","Type":"ContainerDied","Data":"973aaa1c5dfce10cd0c3b0b0d914e087daab3e60f2cdd4ae7bb3cd0b835077fd"} Dec 09 17:18:30 crc kubenswrapper[4840]: I1209 17:18:30.111751 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-api-0" podStartSLOduration=3.111732471 podStartE2EDuration="3.111732471s" podCreationTimestamp="2025-12-09 17:18:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:30.107886574 +0000 UTC m=+1296.098997207" watchObservedRunningTime="2025-12-09 17:18:30.111732471 +0000 UTC m=+1296.102843104" Dec 09 17:18:30 crc kubenswrapper[4840]: I1209 17:18:30.585012 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 09 17:18:30 crc kubenswrapper[4840]: I1209 17:18:30.626675 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40aff238-497c-4504-b6e0-ca86d21d7888" path="/var/lib/kubelet/pods/40aff238-497c-4504-b6e0-ca86d21d7888/volumes" Dec 09 17:18:30 crc kubenswrapper[4840]: I1209 17:18:30.627661 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ada58ef9-8583-4795-8540-093071fb0980" path="/var/lib/kubelet/pods/ada58ef9-8583-4795-8540-093071fb0980/volumes" Dec 09 17:18:31 crc kubenswrapper[4840]: I1209 17:18:31.099013 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" event={"ID":"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f","Type":"ContainerStarted","Data":"c7bae538a1761ed67fd1d8c5b8a3afebfaca317d475d3bce7cc691a15f4ddb6f"} Dec 09 17:18:31 crc kubenswrapper[4840]: I1209 17:18:31.099347 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:31 crc kubenswrapper[4840]: I1209 17:18:31.100898 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3","Type":"ContainerStarted","Data":"ba7db2d213d7e83041b2aa7bc8c097206856bf5bea8e6dbffd7bebf4399d5165"} Dec 09 17:18:31 crc kubenswrapper[4840]: I1209 17:18:31.102521 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e0cfccf-e405-4bf7-84be-ac81e2547a3e","Type":"ContainerStarted","Data":"0f5e044ec0487e2fcdb303ed0257c49403a4e259a6a67144ca376444970adb1d"} Dec 09 17:18:31 crc kubenswrapper[4840]: I1209 17:18:31.119945 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" podStartSLOduration=4.119930026 podStartE2EDuration="4.119930026s" podCreationTimestamp="2025-12-09 17:18:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:31.116364326 +0000 UTC m=+1297.107474959" watchObservedRunningTime="2025-12-09 17:18:31.119930026 +0000 UTC m=+1297.111040659" Dec 09 17:18:31 crc kubenswrapper[4840]: I1209 17:18:31.135083 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-proc-0" podStartSLOduration=2.322084578 podStartE2EDuration="4.135059111s" podCreationTimestamp="2025-12-09 17:18:27 +0000 UTC" firstStartedPulling="2025-12-09 17:18:28.693417211 +0000 UTC m=+1294.684527844" lastFinishedPulling="2025-12-09 17:18:30.506391744 +0000 UTC m=+1296.497502377" observedRunningTime="2025-12-09 17:18:31.132114718 +0000 UTC m=+1297.123225361" watchObservedRunningTime="2025-12-09 17:18:31.135059111 +0000 UTC m=+1297.126169744" Dec 09 17:18:31 crc kubenswrapper[4840]: I1209 17:18:31.155276 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.115524 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e0cfccf-e405-4bf7-84be-ac81e2547a3e","Type":"ContainerStarted","Data":"4c4eb706443042eadfdf25d684cd346ed7c2f7f6a27d73715de865277e52f753"} Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.116107 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cloudkitty-api-0" podUID="081ab362-abc8-4132-8452-bfb0fb02f798" containerName="cloudkitty-api-log" containerID="cri-o://1bfdc0634ff867690a6b1d4e29a43e9a48a9b01a7f995a4d5a45b6075a33d936" gracePeriod=30 Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.117621 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cloudkitty-api-0" podUID="081ab362-abc8-4132-8452-bfb0fb02f798" containerName="cloudkitty-api" containerID="cri-o://1467d208ea32d859df8a05a5b72c263ce5cad379ba7c28998b01347b8a3f7bfb" gracePeriod=30 Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.717358 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 09 17:18:32 crc kubenswrapper[4840]: E1209 17:18:32.718335 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40aff238-497c-4504-b6e0-ca86d21d7888" containerName="dnsmasq-dns" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.718407 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="40aff238-497c-4504-b6e0-ca86d21d7888" containerName="dnsmasq-dns" Dec 09 17:18:32 crc kubenswrapper[4840]: E1209 17:18:32.718520 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40aff238-497c-4504-b6e0-ca86d21d7888" containerName="init" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.718607 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="40aff238-497c-4504-b6e0-ca86d21d7888" containerName="init" Dec 09 17:18:32 crc kubenswrapper[4840]: E1209 17:18:32.718701 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.718776 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api" Dec 09 17:18:32 crc kubenswrapper[4840]: E1209 17:18:32.718865 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api-log" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.718922 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api-log" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.719206 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="40aff238-497c-4504-b6e0-ca86d21d7888" containerName="dnsmasq-dns" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.719800 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.719885 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="ada58ef9-8583-4795-8540-093071fb0980" containerName="barbican-api-log" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.720753 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.724750 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.725176 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-bvrv9" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.725348 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.730144 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.918666 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2bd34f5c-7383-4aa1-868d-f7f462d7a708-openstack-config\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.919134 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkrlp\" (UniqueName: \"kubernetes.io/projected/2bd34f5c-7383-4aa1-868d-f7f462d7a708-kube-api-access-kkrlp\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.919176 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bd34f5c-7383-4aa1-868d-f7f462d7a708-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:32 crc kubenswrapper[4840]: I1209 17:18:32.923066 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2bd34f5c-7383-4aa1-868d-f7f462d7a708-openstack-config-secret\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.026537 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkrlp\" (UniqueName: \"kubernetes.io/projected/2bd34f5c-7383-4aa1-868d-f7f462d7a708-kube-api-access-kkrlp\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.026588 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bd34f5c-7383-4aa1-868d-f7f462d7a708-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.026619 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2bd34f5c-7383-4aa1-868d-f7f462d7a708-openstack-config-secret\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.026652 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2bd34f5c-7383-4aa1-868d-f7f462d7a708-openstack-config\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.027391 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2bd34f5c-7383-4aa1-868d-f7f462d7a708-openstack-config\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.049899 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2bd34f5c-7383-4aa1-868d-f7f462d7a708-openstack-config-secret\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.050431 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bd34f5c-7383-4aa1-868d-f7f462d7a708-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.068505 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkrlp\" (UniqueName: \"kubernetes.io/projected/2bd34f5c-7383-4aa1-868d-f7f462d7a708-kube-api-access-kkrlp\") pod \"openstackclient\" (UID: \"2bd34f5c-7383-4aa1-868d-f7f462d7a708\") " pod="openstack/openstackclient" Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.106400 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.129206 4840 generic.go:334] "Generic (PLEG): container finished" podID="081ab362-abc8-4132-8452-bfb0fb02f798" containerID="1467d208ea32d859df8a05a5b72c263ce5cad379ba7c28998b01347b8a3f7bfb" exitCode=0 Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.129233 4840 generic.go:334] "Generic (PLEG): container finished" podID="081ab362-abc8-4132-8452-bfb0fb02f798" containerID="1bfdc0634ff867690a6b1d4e29a43e9a48a9b01a7f995a4d5a45b6075a33d936" exitCode=143 Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.129274 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"081ab362-abc8-4132-8452-bfb0fb02f798","Type":"ContainerDied","Data":"1467d208ea32d859df8a05a5b72c263ce5cad379ba7c28998b01347b8a3f7bfb"} Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.129303 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"081ab362-abc8-4132-8452-bfb0fb02f798","Type":"ContainerDied","Data":"1bfdc0634ff867690a6b1d4e29a43e9a48a9b01a7f995a4d5a45b6075a33d936"} Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.129420 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cloudkitty-proc-0" podUID="d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" containerName="cloudkitty-proc" containerID="cri-o://ba7db2d213d7e83041b2aa7bc8c097206856bf5bea8e6dbffd7bebf4399d5165" gracePeriod=30 Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.161075 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6bb4fc677f-579hg" podUID="40aff238-497c-4504-b6e0-ca86d21d7888" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.180:5353: i/o timeout" Dec 09 17:18:33 crc kubenswrapper[4840]: I1209 17:18:33.646852 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 09 17:18:34 crc kubenswrapper[4840]: I1209 17:18:34.036529 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:18:34 crc kubenswrapper[4840]: I1209 17:18:34.036836 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:18:34 crc kubenswrapper[4840]: I1209 17:18:34.140032 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"2bd34f5c-7383-4aa1-868d-f7f462d7a708","Type":"ContainerStarted","Data":"935af184ee204265ad54a200c4c84c0fc16eee7d7afb0865e0a98f73ffd6f608"} Dec 09 17:18:34 crc kubenswrapper[4840]: I1209 17:18:34.565532 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 09 17:18:34 crc kubenswrapper[4840]: I1209 17:18:34.670035 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.040186 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.182988 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-scripts\") pod \"081ab362-abc8-4132-8452-bfb0fb02f798\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.183152 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tsfv\" (UniqueName: \"kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-kube-api-access-5tsfv\") pod \"081ab362-abc8-4132-8452-bfb0fb02f798\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.183178 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/081ab362-abc8-4132-8452-bfb0fb02f798-logs\") pod \"081ab362-abc8-4132-8452-bfb0fb02f798\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.183216 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-certs\") pod \"081ab362-abc8-4132-8452-bfb0fb02f798\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.183275 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-combined-ca-bundle\") pod \"081ab362-abc8-4132-8452-bfb0fb02f798\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.183293 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data-custom\") pod \"081ab362-abc8-4132-8452-bfb0fb02f798\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.183330 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data\") pod \"081ab362-abc8-4132-8452-bfb0fb02f798\" (UID: \"081ab362-abc8-4132-8452-bfb0fb02f798\") " Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.197254 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/081ab362-abc8-4132-8452-bfb0fb02f798-logs" (OuterVolumeSpecName: "logs") pod "081ab362-abc8-4132-8452-bfb0fb02f798" (UID: "081ab362-abc8-4132-8452-bfb0fb02f798"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.221333 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-kube-api-access-5tsfv" (OuterVolumeSpecName: "kube-api-access-5tsfv") pod "081ab362-abc8-4132-8452-bfb0fb02f798" (UID: "081ab362-abc8-4132-8452-bfb0fb02f798"). InnerVolumeSpecName "kube-api-access-5tsfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.223422 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "081ab362-abc8-4132-8452-bfb0fb02f798" (UID: "081ab362-abc8-4132-8452-bfb0fb02f798"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.226254 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-certs" (OuterVolumeSpecName: "certs") pod "081ab362-abc8-4132-8452-bfb0fb02f798" (UID: "081ab362-abc8-4132-8452-bfb0fb02f798"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.254815 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e0cfccf-e405-4bf7-84be-ac81e2547a3e","Type":"ContainerStarted","Data":"22f2e8ef349b9da6f4f7c0024392dd51c427265504a7a8c1221824a4e2efabfe"} Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.257457 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.258180 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-scripts" (OuterVolumeSpecName: "scripts") pod "081ab362-abc8-4132-8452-bfb0fb02f798" (UID: "081ab362-abc8-4132-8452-bfb0fb02f798"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.277814 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "081ab362-abc8-4132-8452-bfb0fb02f798" (UID: "081ab362-abc8-4132-8452-bfb0fb02f798"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.296557 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.296598 4840 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.296609 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.296627 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tsfv\" (UniqueName: \"kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-kube-api-access-5tsfv\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.296638 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/081ab362-abc8-4132-8452-bfb0fb02f798-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.296648 4840 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/projected/081ab362-abc8-4132-8452-bfb0fb02f798-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.301553 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data" (OuterVolumeSpecName: "config-data") pod "081ab362-abc8-4132-8452-bfb0fb02f798" (UID: "081ab362-abc8-4132-8452-bfb0fb02f798"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.308816 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="827d00c4-cdf4-43ad-bb9c-746c36adb391" containerName="cinder-scheduler" containerID="cri-o://cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320" gracePeriod=30 Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.310207 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="827d00c4-cdf4-43ad-bb9c-746c36adb391" containerName="probe" containerID="cri-o://c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25" gracePeriod=30 Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.310592 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.310618 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"081ab362-abc8-4132-8452-bfb0fb02f798","Type":"ContainerDied","Data":"8c343610bf598caad1f5c34a8e7c65003330a508609d4c156c06743870e3eed5"} Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.319890 4840 scope.go:117] "RemoveContainer" containerID="1467d208ea32d859df8a05a5b72c263ce5cad379ba7c28998b01347b8a3f7bfb" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.361465 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.7652216320000003 podStartE2EDuration="8.361444502s" podCreationTimestamp="2025-12-09 17:18:27 +0000 UTC" firstStartedPulling="2025-12-09 17:18:28.742331134 +0000 UTC m=+1294.733441777" lastFinishedPulling="2025-12-09 17:18:34.338554014 +0000 UTC m=+1300.329664647" observedRunningTime="2025-12-09 17:18:35.305201074 +0000 UTC m=+1301.296311707" watchObservedRunningTime="2025-12-09 17:18:35.361444502 +0000 UTC m=+1301.352555135" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.403418 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/081ab362-abc8-4132-8452-bfb0fb02f798-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.431096 4840 scope.go:117] "RemoveContainer" containerID="1bfdc0634ff867690a6b1d4e29a43e9a48a9b01a7f995a4d5a45b6075a33d936" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.435246 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 09 17:18:35 crc kubenswrapper[4840]: E1209 17:18:35.445753 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f6c52e_2e47_442e_80fe_a03f7b9582fe.slice/crio-3cba781eb1a23b331b4482d7a007e9bda24e64b87799ef7b46741feabbf661bc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f6c52e_2e47_442e_80fe_a03f7b9582fe.slice\": RecentStats: unable to find data in memory cache]" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.471710 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.497714 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-api-0"] Dec 09 17:18:35 crc kubenswrapper[4840]: E1209 17:18:35.498320 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="081ab362-abc8-4132-8452-bfb0fb02f798" containerName="cloudkitty-api-log" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.498346 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="081ab362-abc8-4132-8452-bfb0fb02f798" containerName="cloudkitty-api-log" Dec 09 17:18:35 crc kubenswrapper[4840]: E1209 17:18:35.498361 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="081ab362-abc8-4132-8452-bfb0fb02f798" containerName="cloudkitty-api" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.498369 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="081ab362-abc8-4132-8452-bfb0fb02f798" containerName="cloudkitty-api" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.498628 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="081ab362-abc8-4132-8452-bfb0fb02f798" containerName="cloudkitty-api-log" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.498650 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="081ab362-abc8-4132-8452-bfb0fb02f798" containerName="cloudkitty-api" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.502013 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.509034 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cloudkitty-public-svc" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.509279 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-api-config-data" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.509465 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cloudkitty-internal-svc" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.528525 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.611406 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.611460 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/35c9d492-0c0b-4d85-9235-e7ede2df5752-certs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.611477 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ln22\" (UniqueName: \"kubernetes.io/projected/35c9d492-0c0b-4d85-9235-e7ede2df5752-kube-api-access-6ln22\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.611500 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-config-data\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.611525 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35c9d492-0c0b-4d85-9235-e7ede2df5752-logs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.611550 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-scripts\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.611611 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-internal-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.611650 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.611682 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-public-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.712930 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-internal-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.713281 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.713335 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-public-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.713395 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.713450 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/35c9d492-0c0b-4d85-9235-e7ede2df5752-certs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.713468 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ln22\" (UniqueName: \"kubernetes.io/projected/35c9d492-0c0b-4d85-9235-e7ede2df5752-kube-api-access-6ln22\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.713504 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-config-data\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.713536 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35c9d492-0c0b-4d85-9235-e7ede2df5752-logs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.713559 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-scripts\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.719666 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35c9d492-0c0b-4d85-9235-e7ede2df5752-logs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.724384 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-public-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.725076 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/35c9d492-0c0b-4d85-9235-e7ede2df5752-certs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.725406 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-internal-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.727104 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.727717 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-scripts\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.728844 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.731943 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c9d492-0c0b-4d85-9235-e7ede2df5752-config-data\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.740420 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ln22\" (UniqueName: \"kubernetes.io/projected/35c9d492-0c0b-4d85-9235-e7ede2df5752-kube-api-access-6ln22\") pod \"cloudkitty-api-0\" (UID: \"35c9d492-0c0b-4d85-9235-e7ede2df5752\") " pod="openstack/cloudkitty-api-0" Dec 09 17:18:35 crc kubenswrapper[4840]: I1209 17:18:35.836814 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 09 17:18:36 crc kubenswrapper[4840]: I1209 17:18:36.327409 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 09 17:18:36 crc kubenswrapper[4840]: I1209 17:18:36.620207 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="081ab362-abc8-4132-8452-bfb0fb02f798" path="/var/lib/kubelet/pods/081ab362-abc8-4132-8452-bfb0fb02f798/volumes" Dec 09 17:18:37 crc kubenswrapper[4840]: I1209 17:18:37.343249 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"35c9d492-0c0b-4d85-9235-e7ede2df5752","Type":"ContainerStarted","Data":"90889b439f74207c9829268371dafc3af3a4e3bbe5c684eefc9e5e0b690ac07e"} Dec 09 17:18:37 crc kubenswrapper[4840]: I1209 17:18:37.975455 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.065796 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-k2r4h"] Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.066081 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" podUID="eae88523-ef33-475f-9d10-ad400eb13260" containerName="dnsmasq-dns" containerID="cri-o://369b3823746f5e78bbe3243adbb133079513a212da3e0c49b02c7d6dd605a323" gracePeriod=10 Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.401218 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"35c9d492-0c0b-4d85-9235-e7ede2df5752","Type":"ContainerStarted","Data":"6bcdb14df25fdb5fa16501342ddba8f04fdc2c8b78892d3b4df48af2ef764b83"} Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.401460 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"35c9d492-0c0b-4d85-9235-e7ede2df5752","Type":"ContainerStarted","Data":"68dad3a948d791feebece341e4c63dcc10195203fa5ef4796ca558f9e146de2f"} Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.407087 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-api-0" Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.435319 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-api-0" podStartSLOduration=3.435295819 podStartE2EDuration="3.435295819s" podCreationTimestamp="2025-12-09 17:18:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:38.426622156 +0000 UTC m=+1304.417732789" watchObservedRunningTime="2025-12-09 17:18:38.435295819 +0000 UTC m=+1304.426406462" Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.439470 4840 generic.go:334] "Generic (PLEG): container finished" podID="827d00c4-cdf4-43ad-bb9c-746c36adb391" containerID="c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25" exitCode=0 Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.439571 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"827d00c4-cdf4-43ad-bb9c-746c36adb391","Type":"ContainerDied","Data":"c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25"} Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.488116 4840 generic.go:334] "Generic (PLEG): container finished" podID="eae88523-ef33-475f-9d10-ad400eb13260" containerID="369b3823746f5e78bbe3243adbb133079513a212da3e0c49b02c7d6dd605a323" exitCode=0 Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.488168 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" event={"ID":"eae88523-ef33-475f-9d10-ad400eb13260","Type":"ContainerDied","Data":"369b3823746f5e78bbe3243adbb133079513a212da3e0c49b02c7d6dd605a323"} Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.706326 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.807775 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jv4z6\" (UniqueName: \"kubernetes.io/projected/eae88523-ef33-475f-9d10-ad400eb13260-kube-api-access-jv4z6\") pod \"eae88523-ef33-475f-9d10-ad400eb13260\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.807832 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-swift-storage-0\") pod \"eae88523-ef33-475f-9d10-ad400eb13260\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.807977 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-nb\") pod \"eae88523-ef33-475f-9d10-ad400eb13260\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.808014 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-config\") pod \"eae88523-ef33-475f-9d10-ad400eb13260\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.808034 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-sb\") pod \"eae88523-ef33-475f-9d10-ad400eb13260\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.808111 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-svc\") pod \"eae88523-ef33-475f-9d10-ad400eb13260\" (UID: \"eae88523-ef33-475f-9d10-ad400eb13260\") " Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.828116 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eae88523-ef33-475f-9d10-ad400eb13260-kube-api-access-jv4z6" (OuterVolumeSpecName: "kube-api-access-jv4z6") pod "eae88523-ef33-475f-9d10-ad400eb13260" (UID: "eae88523-ef33-475f-9d10-ad400eb13260"). InnerVolumeSpecName "kube-api-access-jv4z6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.913934 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jv4z6\" (UniqueName: \"kubernetes.io/projected/eae88523-ef33-475f-9d10-ad400eb13260-kube-api-access-jv4z6\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.928463 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "eae88523-ef33-475f-9d10-ad400eb13260" (UID: "eae88523-ef33-475f-9d10-ad400eb13260"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.930766 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "eae88523-ef33-475f-9d10-ad400eb13260" (UID: "eae88523-ef33-475f-9d10-ad400eb13260"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.962916 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "eae88523-ef33-475f-9d10-ad400eb13260" (UID: "eae88523-ef33-475f-9d10-ad400eb13260"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:38 crc kubenswrapper[4840]: I1209 17:18:38.967454 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-config" (OuterVolumeSpecName: "config") pod "eae88523-ef33-475f-9d10-ad400eb13260" (UID: "eae88523-ef33-475f-9d10-ad400eb13260"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.001479 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "eae88523-ef33-475f-9d10-ad400eb13260" (UID: "eae88523-ef33-475f-9d10-ad400eb13260"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.020216 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.020250 4840 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.020262 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.020616 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.020633 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eae88523-ef33-475f-9d10-ad400eb13260-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.200523 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.267159 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="b22b06eb-f287-43cf-abc6-9cb5580fa71a" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.182:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.330102 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-scripts\") pod \"827d00c4-cdf4-43ad-bb9c-746c36adb391\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.330185 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dlbz\" (UniqueName: \"kubernetes.io/projected/827d00c4-cdf4-43ad-bb9c-746c36adb391-kube-api-access-7dlbz\") pod \"827d00c4-cdf4-43ad-bb9c-746c36adb391\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.330234 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data\") pod \"827d00c4-cdf4-43ad-bb9c-746c36adb391\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.330289 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data-custom\") pod \"827d00c4-cdf4-43ad-bb9c-746c36adb391\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.330319 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-combined-ca-bundle\") pod \"827d00c4-cdf4-43ad-bb9c-746c36adb391\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.330386 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/827d00c4-cdf4-43ad-bb9c-746c36adb391-etc-machine-id\") pod \"827d00c4-cdf4-43ad-bb9c-746c36adb391\" (UID: \"827d00c4-cdf4-43ad-bb9c-746c36adb391\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.331126 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/827d00c4-cdf4-43ad-bb9c-746c36adb391-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "827d00c4-cdf4-43ad-bb9c-746c36adb391" (UID: "827d00c4-cdf4-43ad-bb9c-746c36adb391"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.335218 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-scripts" (OuterVolumeSpecName: "scripts") pod "827d00c4-cdf4-43ad-bb9c-746c36adb391" (UID: "827d00c4-cdf4-43ad-bb9c-746c36adb391"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.337929 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "827d00c4-cdf4-43ad-bb9c-746c36adb391" (UID: "827d00c4-cdf4-43ad-bb9c-746c36adb391"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.351476 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/827d00c4-cdf4-43ad-bb9c-746c36adb391-kube-api-access-7dlbz" (OuterVolumeSpecName: "kube-api-access-7dlbz") pod "827d00c4-cdf4-43ad-bb9c-746c36adb391" (UID: "827d00c4-cdf4-43ad-bb9c-746c36adb391"). InnerVolumeSpecName "kube-api-access-7dlbz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.427228 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "827d00c4-cdf4-43ad-bb9c-746c36adb391" (UID: "827d00c4-cdf4-43ad-bb9c-746c36adb391"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.435220 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.435403 4840 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/827d00c4-cdf4-43ad-bb9c-746c36adb391-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.435461 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.435542 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dlbz\" (UniqueName: \"kubernetes.io/projected/827d00c4-cdf4-43ad-bb9c-746c36adb391-kube-api-access-7dlbz\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.435607 4840 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.517600 4840 generic.go:334] "Generic (PLEG): container finished" podID="827d00c4-cdf4-43ad-bb9c-746c36adb391" containerID="cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320" exitCode=0 Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.517663 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.517675 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"827d00c4-cdf4-43ad-bb9c-746c36adb391","Type":"ContainerDied","Data":"cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320"} Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.517706 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"827d00c4-cdf4-43ad-bb9c-746c36adb391","Type":"ContainerDied","Data":"1734c2e85b028b51b515f23f597097ce8cd8515077005dcca81c992c0e6812aa"} Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.517724 4840 scope.go:117] "RemoveContainer" containerID="c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.545394 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" event={"ID":"eae88523-ef33-475f-9d10-ad400eb13260","Type":"ContainerDied","Data":"115c61e6da2131fa7a33b5881b1366cc710ccefe7e73367b229cc501cb2c794b"} Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.545515 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-k2r4h" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.546241 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data" (OuterVolumeSpecName: "config-data") pod "827d00c4-cdf4-43ad-bb9c-746c36adb391" (UID: "827d00c4-cdf4-43ad-bb9c-746c36adb391"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.557770 4840 generic.go:334] "Generic (PLEG): container finished" podID="d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" containerID="ba7db2d213d7e83041b2aa7bc8c097206856bf5bea8e6dbffd7bebf4399d5165" exitCode=0 Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.558684 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3","Type":"ContainerDied","Data":"ba7db2d213d7e83041b2aa7bc8c097206856bf5bea8e6dbffd7bebf4399d5165"} Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.586139 4840 scope.go:117] "RemoveContainer" containerID="cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.646025 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-k2r4h"] Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.651548 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/827d00c4-cdf4-43ad-bb9c-746c36adb391-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.669480 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-k2r4h"] Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.712155 4840 scope.go:117] "RemoveContainer" containerID="c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25" Dec 09 17:18:39 crc kubenswrapper[4840]: E1209 17:18:39.720646 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25\": container with ID starting with c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25 not found: ID does not exist" containerID="c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.720702 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25"} err="failed to get container status \"c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25\": rpc error: code = NotFound desc = could not find container \"c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25\": container with ID starting with c31f84f8f194b89c6fdfa96c7a4f7af6eabcc5720df709382aa63ba4a7685d25 not found: ID does not exist" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.720735 4840 scope.go:117] "RemoveContainer" containerID="cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320" Dec 09 17:18:39 crc kubenswrapper[4840]: E1209 17:18:39.730107 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320\": container with ID starting with cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320 not found: ID does not exist" containerID="cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.730150 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320"} err="failed to get container status \"cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320\": rpc error: code = NotFound desc = could not find container \"cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320\": container with ID starting with cca6d57ac8e6d137f404aa247924c808929bff8db0d2643a71580cacf060b320 not found: ID does not exist" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.730175 4840 scope.go:117] "RemoveContainer" containerID="369b3823746f5e78bbe3243adbb133079513a212da3e0c49b02c7d6dd605a323" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.779157 4840 scope.go:117] "RemoveContainer" containerID="54ebec986f16a325cdd6a6c3c0c59bcd15e0ecd2927d667e60f736936e0ea2b5" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.799654 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.936306 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.962918 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9lvxd\" (UniqueName: \"kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-kube-api-access-9lvxd\") pod \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.963005 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-scripts\") pod \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.963262 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-combined-ca-bundle\") pod \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.963295 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data-custom\") pod \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.963326 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data\") pod \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.963346 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-certs\") pod \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\" (UID: \"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3\") " Dec 09 17:18:39 crc kubenswrapper[4840]: I1209 17:18:39.977769 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" (UID: "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.001819 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-kube-api-access-9lvxd" (OuterVolumeSpecName: "kube-api-access-9lvxd") pod "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" (UID: "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3"). InnerVolumeSpecName "kube-api-access-9lvxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.003159 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-scripts" (OuterVolumeSpecName: "scripts") pod "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" (UID: "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.013176 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-certs" (OuterVolumeSpecName: "certs") pod "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" (UID: "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.015125 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.028181 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" (UID: "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.060013 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.065845 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.065875 4840 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.065887 4840 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.065896 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9lvxd\" (UniqueName: \"kubernetes.io/projected/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-kube-api-access-9lvxd\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.065906 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.079078 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 17:18:40 crc kubenswrapper[4840]: E1209 17:18:40.079449 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="827d00c4-cdf4-43ad-bb9c-746c36adb391" containerName="cinder-scheduler" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.079465 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="827d00c4-cdf4-43ad-bb9c-746c36adb391" containerName="cinder-scheduler" Dec 09 17:18:40 crc kubenswrapper[4840]: E1209 17:18:40.079476 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eae88523-ef33-475f-9d10-ad400eb13260" containerName="dnsmasq-dns" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.079484 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="eae88523-ef33-475f-9d10-ad400eb13260" containerName="dnsmasq-dns" Dec 09 17:18:40 crc kubenswrapper[4840]: E1209 17:18:40.079497 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="827d00c4-cdf4-43ad-bb9c-746c36adb391" containerName="probe" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.079503 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="827d00c4-cdf4-43ad-bb9c-746c36adb391" containerName="probe" Dec 09 17:18:40 crc kubenswrapper[4840]: E1209 17:18:40.079511 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" containerName="cloudkitty-proc" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.079516 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" containerName="cloudkitty-proc" Dec 09 17:18:40 crc kubenswrapper[4840]: E1209 17:18:40.079544 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eae88523-ef33-475f-9d10-ad400eb13260" containerName="init" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.079550 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="eae88523-ef33-475f-9d10-ad400eb13260" containerName="init" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.079727 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" containerName="cloudkitty-proc" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.079741 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="eae88523-ef33-475f-9d10-ad400eb13260" containerName="dnsmasq-dns" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.079753 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="827d00c4-cdf4-43ad-bb9c-746c36adb391" containerName="probe" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.079766 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="827d00c4-cdf4-43ad-bb9c-746c36adb391" containerName="cinder-scheduler" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.081091 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.083791 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.108247 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.121143 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data" (OuterVolumeSpecName: "config-data") pod "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" (UID: "d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.170449 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.274620 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/308bf122-33f6-46ac-bcda-722eacff6427-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.274726 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb8j2\" (UniqueName: \"kubernetes.io/projected/308bf122-33f6-46ac-bcda-722eacff6427-kube-api-access-zb8j2\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.274765 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-config-data\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.274802 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-scripts\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.274844 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.274861 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.376091 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb8j2\" (UniqueName: \"kubernetes.io/projected/308bf122-33f6-46ac-bcda-722eacff6427-kube-api-access-zb8j2\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.376158 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-config-data\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.376197 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-scripts\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.376237 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.376259 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.376305 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/308bf122-33f6-46ac-bcda-722eacff6427-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.376407 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/308bf122-33f6-46ac-bcda-722eacff6427-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.387692 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-scripts\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.388488 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-config-data\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.389162 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.395288 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/308bf122-33f6-46ac-bcda-722eacff6427-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.399714 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb8j2\" (UniqueName: \"kubernetes.io/projected/308bf122-33f6-46ac-bcda-722eacff6427-kube-api-access-zb8j2\") pod \"cinder-scheduler-0\" (UID: \"308bf122-33f6-46ac-bcda-722eacff6427\") " pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.497401 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.597652 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3","Type":"ContainerDied","Data":"e129fb3e85910b901f2195697a95524cafe1f3d10162bd6308e1b24cf261fbe2"} Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.597932 4840 scope.go:117] "RemoveContainer" containerID="ba7db2d213d7e83041b2aa7bc8c097206856bf5bea8e6dbffd7bebf4399d5165" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.598098 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.657978 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="827d00c4-cdf4-43ad-bb9c-746c36adb391" path="/var/lib/kubelet/pods/827d00c4-cdf4-43ad-bb9c-746c36adb391/volumes" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.662237 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eae88523-ef33-475f-9d10-ad400eb13260" path="/var/lib/kubelet/pods/eae88523-ef33-475f-9d10-ad400eb13260/volumes" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.691052 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.719399 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.738037 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.739760 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.763674 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.764601 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-proc-config-data" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.901689 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-scripts\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.901768 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-config-data\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.901796 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/3cbe862b-8e49-4124-88e1-1f32cd429250-certs\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.901859 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bb6nh\" (UniqueName: \"kubernetes.io/projected/3cbe862b-8e49-4124-88e1-1f32cd429250-kube-api-access-bb6nh\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.901879 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:40 crc kubenswrapper[4840]: I1209 17:18:40.901935 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.004504 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-scripts\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.004749 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-config-data\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.004771 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/3cbe862b-8e49-4124-88e1-1f32cd429250-certs\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.004822 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bb6nh\" (UniqueName: \"kubernetes.io/projected/3cbe862b-8e49-4124-88e1-1f32cd429250-kube-api-access-bb6nh\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.004840 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.004888 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.010694 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/3cbe862b-8e49-4124-88e1-1f32cd429250-certs\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.012172 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-scripts\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.012671 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-config-data\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.013454 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.035438 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3cbe862b-8e49-4124-88e1-1f32cd429250-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.042642 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bb6nh\" (UniqueName: \"kubernetes.io/projected/3cbe862b-8e49-4124-88e1-1f32cd429250-kube-api-access-bb6nh\") pod \"cloudkitty-proc-0\" (UID: \"3cbe862b-8e49-4124-88e1-1f32cd429250\") " pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: W1209 17:18:41.060186 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod308bf122_33f6_46ac_bcda_722eacff6427.slice/crio-6ce67f287c9e7c16e8745104b94cfae9dc9f2cb02867b4303d822e66530c2b02 WatchSource:0}: Error finding container 6ce67f287c9e7c16e8745104b94cfae9dc9f2cb02867b4303d822e66530c2b02: Status 404 returned error can't find the container with id 6ce67f287c9e7c16e8745104b94cfae9dc9f2cb02867b4303d822e66530c2b02 Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.070478 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.079422 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.093693 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.087699 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5df4f579dd-7gd8n" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.590158 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 09 17:18:41 crc kubenswrapper[4840]: W1209 17:18:41.604245 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3cbe862b_8e49_4124_88e1_1f32cd429250.slice/crio-5e1a868e61b4db6e29a8799ba5275504de92f1ed4fc79f6babc56aa2f8a0ac6d WatchSource:0}: Error finding container 5e1a868e61b4db6e29a8799ba5275504de92f1ed4fc79f6babc56aa2f8a0ac6d: Status 404 returned error can't find the container with id 5e1a868e61b4db6e29a8799ba5275504de92f1ed4fc79f6babc56aa2f8a0ac6d Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.647346 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"3cbe862b-8e49-4124-88e1-1f32cd429250","Type":"ContainerStarted","Data":"5e1a868e61b4db6e29a8799ba5275504de92f1ed4fc79f6babc56aa2f8a0ac6d"} Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.655187 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"308bf122-33f6-46ac-bcda-722eacff6427","Type":"ContainerStarted","Data":"6ce67f287c9e7c16e8745104b94cfae9dc9f2cb02867b4303d822e66530c2b02"} Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.799031 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7d5bcffc7c-n8wfr"] Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.800713 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.804400 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.804708 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.804758 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.817443 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7d5bcffc7c-n8wfr"] Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.928255 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-config-data\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.928629 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-internal-tls-certs\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.928691 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-log-httpd\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.928713 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-public-tls-certs\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.928742 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-combined-ca-bundle\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.928762 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ftfr\" (UniqueName: \"kubernetes.io/projected/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-kube-api-access-7ftfr\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.928794 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-etc-swift\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:41 crc kubenswrapper[4840]: I1209 17:18:41.928822 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-run-httpd\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.030704 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-config-data\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.030861 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-internal-tls-certs\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.030942 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-log-httpd\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.030980 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-public-tls-certs\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.031011 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-combined-ca-bundle\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.031060 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ftfr\" (UniqueName: \"kubernetes.io/projected/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-kube-api-access-7ftfr\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.031086 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-etc-swift\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.031117 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-run-httpd\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.032234 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-run-httpd\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.035546 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-log-httpd\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.036500 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-internal-tls-certs\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.037135 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-public-tls-certs\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.037941 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-config-data\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.038472 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-etc-swift\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.038789 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-combined-ca-bundle\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.049087 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ftfr\" (UniqueName: \"kubernetes.io/projected/e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7-kube-api-access-7ftfr\") pod \"swift-proxy-7d5bcffc7c-n8wfr\" (UID: \"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7\") " pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.119229 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.621751 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3" path="/var/lib/kubelet/pods/d6bb5f91-fdd2-424f-964a-f73a3ee8f8b3/volumes" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.678824 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"3cbe862b-8e49-4124-88e1-1f32cd429250","Type":"ContainerStarted","Data":"52c6da727dbb6c986b79017b85959b1d102108e727e9522d5d9b63955e33af71"} Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.702165 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"308bf122-33f6-46ac-bcda-722eacff6427","Type":"ContainerStarted","Data":"a3fb7768b32b0039c6d54c7638597919660d4c295b455d65300641d678553483"} Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.702206 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"308bf122-33f6-46ac-bcda-722eacff6427","Type":"ContainerStarted","Data":"09c38e7910b275d0b7100d0979b1b5a22aa6f65e38af5d4dec42c19a4f8ddc51"} Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.704176 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-proc-0" podStartSLOduration=2.704166752 podStartE2EDuration="2.704166752s" podCreationTimestamp="2025-12-09 17:18:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:42.698230945 +0000 UTC m=+1308.689341588" watchObservedRunningTime="2025-12-09 17:18:42.704166752 +0000 UTC m=+1308.695277385" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.722957 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.7229329780000002 podStartE2EDuration="3.722932978s" podCreationTimestamp="2025-12-09 17:18:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:42.717767113 +0000 UTC m=+1308.708877756" watchObservedRunningTime="2025-12-09 17:18:42.722932978 +0000 UTC m=+1308.714043611" Dec 09 17:18:42 crc kubenswrapper[4840]: I1209 17:18:42.828581 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7d5bcffc7c-n8wfr"] Dec 09 17:18:45 crc kubenswrapper[4840]: I1209 17:18:45.497572 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 09 17:18:45 crc kubenswrapper[4840]: E1209 17:18:45.737263 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f6c52e_2e47_442e_80fe_a03f7b9582fe.slice/crio-3cba781eb1a23b331b4482d7a007e9bda24e64b87799ef7b46741feabbf661bc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f6c52e_2e47_442e_80fe_a03f7b9582fe.slice\": RecentStats: unable to find data in memory cache]" Dec 09 17:18:46 crc kubenswrapper[4840]: I1209 17:18:46.888651 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:46 crc kubenswrapper[4840]: I1209 17:18:46.889204 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="ceilometer-central-agent" containerID="cri-o://0f5e7249dcfb89ee8f3a24f02cfe01779a4821a3de34ce30c130ec1154f6f867" gracePeriod=30 Dec 09 17:18:46 crc kubenswrapper[4840]: I1209 17:18:46.889350 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="proxy-httpd" containerID="cri-o://22f2e8ef349b9da6f4f7c0024392dd51c427265504a7a8c1221824a4e2efabfe" gracePeriod=30 Dec 09 17:18:46 crc kubenswrapper[4840]: I1209 17:18:46.889396 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="sg-core" containerID="cri-o://4c4eb706443042eadfdf25d684cd346ed7c2f7f6a27d73715de865277e52f753" gracePeriod=30 Dec 09 17:18:46 crc kubenswrapper[4840]: I1209 17:18:46.889425 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="ceilometer-notification-agent" containerID="cri-o://0f5e044ec0487e2fcdb303ed0257c49403a4e259a6a67144ca376444970adb1d" gracePeriod=30 Dec 09 17:18:46 crc kubenswrapper[4840]: I1209 17:18:46.896260 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.184:3000/\": EOF" Dec 09 17:18:47 crc kubenswrapper[4840]: I1209 17:18:47.760774 4840 generic.go:334] "Generic (PLEG): container finished" podID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerID="22f2e8ef349b9da6f4f7c0024392dd51c427265504a7a8c1221824a4e2efabfe" exitCode=0 Dec 09 17:18:47 crc kubenswrapper[4840]: I1209 17:18:47.760811 4840 generic.go:334] "Generic (PLEG): container finished" podID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerID="4c4eb706443042eadfdf25d684cd346ed7c2f7f6a27d73715de865277e52f753" exitCode=2 Dec 09 17:18:47 crc kubenswrapper[4840]: I1209 17:18:47.760820 4840 generic.go:334] "Generic (PLEG): container finished" podID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerID="0f5e7249dcfb89ee8f3a24f02cfe01779a4821a3de34ce30c130ec1154f6f867" exitCode=0 Dec 09 17:18:47 crc kubenswrapper[4840]: I1209 17:18:47.760841 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e0cfccf-e405-4bf7-84be-ac81e2547a3e","Type":"ContainerDied","Data":"22f2e8ef349b9da6f4f7c0024392dd51c427265504a7a8c1221824a4e2efabfe"} Dec 09 17:18:47 crc kubenswrapper[4840]: I1209 17:18:47.760865 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e0cfccf-e405-4bf7-84be-ac81e2547a3e","Type":"ContainerDied","Data":"4c4eb706443042eadfdf25d684cd346ed7c2f7f6a27d73715de865277e52f753"} Dec 09 17:18:47 crc kubenswrapper[4840]: I1209 17:18:47.760875 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e0cfccf-e405-4bf7-84be-ac81e2547a3e","Type":"ContainerDied","Data":"0f5e7249dcfb89ee8f3a24f02cfe01779a4821a3de34ce30c130ec1154f6f867"} Dec 09 17:18:49 crc kubenswrapper[4840]: I1209 17:18:49.790139 4840 generic.go:334] "Generic (PLEG): container finished" podID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerID="0f5e044ec0487e2fcdb303ed0257c49403a4e259a6a67144ca376444970adb1d" exitCode=0 Dec 09 17:18:49 crc kubenswrapper[4840]: I1209 17:18:49.790211 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e0cfccf-e405-4bf7-84be-ac81e2547a3e","Type":"ContainerDied","Data":"0f5e044ec0487e2fcdb303ed0257c49403a4e259a6a67144ca376444970adb1d"} Dec 09 17:18:50 crc kubenswrapper[4840]: W1209 17:18:50.301756 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6dbbeb6_1d77_472c_928f_c3abcaa2a8e7.slice/crio-5992a985dc62289fe86464218d4aa45537e01b2919a031e3e63d71a19378f682 WatchSource:0}: Error finding container 5992a985dc62289fe86464218d4aa45537e01b2919a031e3e63d71a19378f682: Status 404 returned error can't find the container with id 5992a985dc62289fe86464218d4aa45537e01b2919a031e3e63d71a19378f682 Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.726117 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.764752 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.809501 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e0cfccf-e405-4bf7-84be-ac81e2547a3e","Type":"ContainerDied","Data":"02da8daafddf9b986b99c625d4d83e10495233d407ac32d9e539efe8bc5f8e18"} Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.809504 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.809557 4840 scope.go:117] "RemoveContainer" containerID="22f2e8ef349b9da6f4f7c0024392dd51c427265504a7a8c1221824a4e2efabfe" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.824368 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" event={"ID":"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7","Type":"ContainerStarted","Data":"5992a985dc62289fe86464218d4aa45537e01b2919a031e3e63d71a19378f682"} Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.869156 4840 scope.go:117] "RemoveContainer" containerID="4c4eb706443042eadfdf25d684cd346ed7c2f7f6a27d73715de865277e52f753" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.897543 4840 scope.go:117] "RemoveContainer" containerID="0f5e044ec0487e2fcdb303ed0257c49403a4e259a6a67144ca376444970adb1d" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.920220 4840 scope.go:117] "RemoveContainer" containerID="0f5e7249dcfb89ee8f3a24f02cfe01779a4821a3de34ce30c130ec1154f6f867" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.930484 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-sg-core-conf-yaml\") pod \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.930655 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-log-httpd\") pod \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.930756 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-scripts\") pod \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.930884 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-config-data\") pod \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.931083 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-combined-ca-bundle\") pod \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.931285 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-run-httpd\") pod \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.931417 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kp88z\" (UniqueName: \"kubernetes.io/projected/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-kube-api-access-kp88z\") pod \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\" (UID: \"1e0cfccf-e405-4bf7-84be-ac81e2547a3e\") " Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.931568 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1e0cfccf-e405-4bf7-84be-ac81e2547a3e" (UID: "1e0cfccf-e405-4bf7-84be-ac81e2547a3e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.931856 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1e0cfccf-e405-4bf7-84be-ac81e2547a3e" (UID: "1e0cfccf-e405-4bf7-84be-ac81e2547a3e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.932300 4840 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.932400 4840 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.934472 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-scripts" (OuterVolumeSpecName: "scripts") pod "1e0cfccf-e405-4bf7-84be-ac81e2547a3e" (UID: "1e0cfccf-e405-4bf7-84be-ac81e2547a3e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.938173 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-kube-api-access-kp88z" (OuterVolumeSpecName: "kube-api-access-kp88z") pod "1e0cfccf-e405-4bf7-84be-ac81e2547a3e" (UID: "1e0cfccf-e405-4bf7-84be-ac81e2547a3e"). InnerVolumeSpecName "kube-api-access-kp88z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:18:50 crc kubenswrapper[4840]: I1209 17:18:50.955516 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1e0cfccf-e405-4bf7-84be-ac81e2547a3e" (UID: "1e0cfccf-e405-4bf7-84be-ac81e2547a3e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.020616 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e0cfccf-e405-4bf7-84be-ac81e2547a3e" (UID: "1e0cfccf-e405-4bf7-84be-ac81e2547a3e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.034784 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.034819 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kp88z\" (UniqueName: \"kubernetes.io/projected/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-kube-api-access-kp88z\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.034830 4840 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.034840 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.042256 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-config-data" (OuterVolumeSpecName: "config-data") pod "1e0cfccf-e405-4bf7-84be-ac81e2547a3e" (UID: "1e0cfccf-e405-4bf7-84be-ac81e2547a3e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.136369 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e0cfccf-e405-4bf7-84be-ac81e2547a3e-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.148500 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.157680 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.209741 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:51 crc kubenswrapper[4840]: E1209 17:18:51.211314 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="proxy-httpd" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.211339 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="proxy-httpd" Dec 09 17:18:51 crc kubenswrapper[4840]: E1209 17:18:51.211369 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="ceilometer-notification-agent" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.211379 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="ceilometer-notification-agent" Dec 09 17:18:51 crc kubenswrapper[4840]: E1209 17:18:51.211405 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="sg-core" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.211414 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="sg-core" Dec 09 17:18:51 crc kubenswrapper[4840]: E1209 17:18:51.211423 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="ceilometer-central-agent" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.211431 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="ceilometer-central-agent" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.211672 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="ceilometer-central-agent" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.211695 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="sg-core" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.211708 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="proxy-httpd" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.211806 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" containerName="ceilometer-notification-agent" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.215205 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.216671 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.217247 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.221045 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.339282 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-scripts\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.339791 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.339944 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-config-data\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.340081 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-log-httpd\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.340215 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.340336 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-run-httpd\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.340464 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55jgj\" (UniqueName: \"kubernetes.io/projected/6ed8ebe6-4a23-4996-916d-16044c052e9c-kube-api-access-55jgj\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.442187 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-scripts\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.442271 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.442330 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-config-data\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.442359 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-log-httpd\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.442430 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.442482 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-run-httpd\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.442533 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55jgj\" (UniqueName: \"kubernetes.io/projected/6ed8ebe6-4a23-4996-916d-16044c052e9c-kube-api-access-55jgj\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.443470 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-run-httpd\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.443807 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-log-httpd\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.448386 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.448767 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-config-data\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.449664 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.452266 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-scripts\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.467020 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55jgj\" (UniqueName: \"kubernetes.io/projected/6ed8ebe6-4a23-4996-916d-16044c052e9c-kube-api-access-55jgj\") pod \"ceilometer-0\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.534854 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.840249 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"2bd34f5c-7383-4aa1-868d-f7f462d7a708","Type":"ContainerStarted","Data":"d776d1dc30a45bce3f59c546de509094f4445e70a7e6ad800aaddec172d16d2f"} Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.841821 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" event={"ID":"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7","Type":"ContainerStarted","Data":"f0726120c111f5656d53f671cd811cf045a0b4f7d4b2f0a7126ecbe39c330df7"} Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.841860 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" event={"ID":"e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7","Type":"ContainerStarted","Data":"14f480942878befc6c3dc8402dbe7cba1c9be1702df0d32d7e7c5ae7b843a755"} Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.842019 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.861611 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.153257601 podStartE2EDuration="19.861594092s" podCreationTimestamp="2025-12-09 17:18:32 +0000 UTC" firstStartedPulling="2025-12-09 17:18:33.663856366 +0000 UTC m=+1299.654966999" lastFinishedPulling="2025-12-09 17:18:50.372192857 +0000 UTC m=+1316.363303490" observedRunningTime="2025-12-09 17:18:51.857299821 +0000 UTC m=+1317.848410494" watchObservedRunningTime="2025-12-09 17:18:51.861594092 +0000 UTC m=+1317.852704725" Dec 09 17:18:51 crc kubenswrapper[4840]: I1209 17:18:51.891268 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" podStartSLOduration=10.891246414 podStartE2EDuration="10.891246414s" podCreationTimestamp="2025-12-09 17:18:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:18:51.885472972 +0000 UTC m=+1317.876583625" watchObservedRunningTime="2025-12-09 17:18:51.891246414 +0000 UTC m=+1317.882357057" Dec 09 17:18:52 crc kubenswrapper[4840]: I1209 17:18:52.003647 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:52 crc kubenswrapper[4840]: W1209 17:18:52.004071 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ed8ebe6_4a23_4996_916d_16044c052e9c.slice/crio-dde26d8413f434416eb0dc2f4baa347d8efb12e657f6f9325b6a94036e9bc517 WatchSource:0}: Error finding container dde26d8413f434416eb0dc2f4baa347d8efb12e657f6f9325b6a94036e9bc517: Status 404 returned error can't find the container with id dde26d8413f434416eb0dc2f4baa347d8efb12e657f6f9325b6a94036e9bc517 Dec 09 17:18:52 crc kubenswrapper[4840]: I1209 17:18:52.120680 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:52 crc kubenswrapper[4840]: I1209 17:18:52.624681 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e0cfccf-e405-4bf7-84be-ac81e2547a3e" path="/var/lib/kubelet/pods/1e0cfccf-e405-4bf7-84be-ac81e2547a3e/volumes" Dec 09 17:18:52 crc kubenswrapper[4840]: I1209 17:18:52.859680 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed8ebe6-4a23-4996-916d-16044c052e9c","Type":"ContainerStarted","Data":"f265a9584e6d81211bd62e00de6fe1a7a1b441d63e814ace8a392ed99824fb45"} Dec 09 17:18:52 crc kubenswrapper[4840]: I1209 17:18:52.859799 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed8ebe6-4a23-4996-916d-16044c052e9c","Type":"ContainerStarted","Data":"dde26d8413f434416eb0dc2f4baa347d8efb12e657f6f9325b6a94036e9bc517"} Dec 09 17:18:53 crc kubenswrapper[4840]: I1209 17:18:53.871775 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed8ebe6-4a23-4996-916d-16044c052e9c","Type":"ContainerStarted","Data":"c5ea309a971dfbbfca27ca31e6e2a5f35628e0d75e71e4ec34a2f07a6af82b50"} Dec 09 17:18:54 crc kubenswrapper[4840]: I1209 17:18:54.883522 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed8ebe6-4a23-4996-916d-16044c052e9c","Type":"ContainerStarted","Data":"2a25da6a2339ad2a6b6dc481d3fdb0b75edfeb3920858320efc2c1c006f0ace3"} Dec 09 17:18:56 crc kubenswrapper[4840]: I1209 17:18:56.912427 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed8ebe6-4a23-4996-916d-16044c052e9c","Type":"ContainerStarted","Data":"b613078e2558a0ebf66d67bd3581db6b7e0a402395f26725098689cc50039eb4"} Dec 09 17:18:56 crc kubenswrapper[4840]: I1209 17:18:56.913082 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 17:18:56 crc kubenswrapper[4840]: I1209 17:18:56.934431 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.749388768 podStartE2EDuration="5.934411709s" podCreationTimestamp="2025-12-09 17:18:51 +0000 UTC" firstStartedPulling="2025-12-09 17:18:52.006905098 +0000 UTC m=+1317.998015731" lastFinishedPulling="2025-12-09 17:18:56.191928039 +0000 UTC m=+1322.183038672" observedRunningTime="2025-12-09 17:18:56.933664998 +0000 UTC m=+1322.924775651" watchObservedRunningTime="2025-12-09 17:18:56.934411709 +0000 UTC m=+1322.925522342" Dec 09 17:18:57 crc kubenswrapper[4840]: I1209 17:18:57.132925 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:57 crc kubenswrapper[4840]: I1209 17:18:57.132996 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.260121 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.261354 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="ceilometer-central-agent" containerID="cri-o://f265a9584e6d81211bd62e00de6fe1a7a1b441d63e814ace8a392ed99824fb45" gracePeriod=30 Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.261480 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="proxy-httpd" containerID="cri-o://b613078e2558a0ebf66d67bd3581db6b7e0a402395f26725098689cc50039eb4" gracePeriod=30 Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.261426 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="ceilometer-notification-agent" containerID="cri-o://c5ea309a971dfbbfca27ca31e6e2a5f35628e0d75e71e4ec34a2f07a6af82b50" gracePeriod=30 Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.261394 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="sg-core" containerID="cri-o://2a25da6a2339ad2a6b6dc481d3fdb0b75edfeb3920858320efc2c1c006f0ace3" gracePeriod=30 Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.942849 4840 generic.go:334] "Generic (PLEG): container finished" podID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerID="b613078e2558a0ebf66d67bd3581db6b7e0a402395f26725098689cc50039eb4" exitCode=0 Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.943315 4840 generic.go:334] "Generic (PLEG): container finished" podID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerID="2a25da6a2339ad2a6b6dc481d3fdb0b75edfeb3920858320efc2c1c006f0ace3" exitCode=2 Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.943326 4840 generic.go:334] "Generic (PLEG): container finished" podID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerID="c5ea309a971dfbbfca27ca31e6e2a5f35628e0d75e71e4ec34a2f07a6af82b50" exitCode=0 Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.943333 4840 generic.go:334] "Generic (PLEG): container finished" podID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerID="f265a9584e6d81211bd62e00de6fe1a7a1b441d63e814ace8a392ed99824fb45" exitCode=0 Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.943351 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed8ebe6-4a23-4996-916d-16044c052e9c","Type":"ContainerDied","Data":"b613078e2558a0ebf66d67bd3581db6b7e0a402395f26725098689cc50039eb4"} Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.943375 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed8ebe6-4a23-4996-916d-16044c052e9c","Type":"ContainerDied","Data":"2a25da6a2339ad2a6b6dc481d3fdb0b75edfeb3920858320efc2c1c006f0ace3"} Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.943385 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed8ebe6-4a23-4996-916d-16044c052e9c","Type":"ContainerDied","Data":"c5ea309a971dfbbfca27ca31e6e2a5f35628e0d75e71e4ec34a2f07a6af82b50"} Dec 09 17:18:59 crc kubenswrapper[4840]: I1209 17:18:59.943394 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed8ebe6-4a23-4996-916d-16044c052e9c","Type":"ContainerDied","Data":"f265a9584e6d81211bd62e00de6fe1a7a1b441d63e814ace8a392ed99824fb45"} Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.048821 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.147500 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-config-data\") pod \"6ed8ebe6-4a23-4996-916d-16044c052e9c\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.147645 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-run-httpd\") pod \"6ed8ebe6-4a23-4996-916d-16044c052e9c\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.147680 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-scripts\") pod \"6ed8ebe6-4a23-4996-916d-16044c052e9c\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.147825 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-log-httpd\") pod \"6ed8ebe6-4a23-4996-916d-16044c052e9c\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.147915 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-sg-core-conf-yaml\") pod \"6ed8ebe6-4a23-4996-916d-16044c052e9c\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.147992 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-combined-ca-bundle\") pod \"6ed8ebe6-4a23-4996-916d-16044c052e9c\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.148052 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55jgj\" (UniqueName: \"kubernetes.io/projected/6ed8ebe6-4a23-4996-916d-16044c052e9c-kube-api-access-55jgj\") pod \"6ed8ebe6-4a23-4996-916d-16044c052e9c\" (UID: \"6ed8ebe6-4a23-4996-916d-16044c052e9c\") " Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.148220 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6ed8ebe6-4a23-4996-916d-16044c052e9c" (UID: "6ed8ebe6-4a23-4996-916d-16044c052e9c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.148464 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6ed8ebe6-4a23-4996-916d-16044c052e9c" (UID: "6ed8ebe6-4a23-4996-916d-16044c052e9c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.148794 4840 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.148816 4840 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6ed8ebe6-4a23-4996-916d-16044c052e9c-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.152871 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ed8ebe6-4a23-4996-916d-16044c052e9c-kube-api-access-55jgj" (OuterVolumeSpecName: "kube-api-access-55jgj") pod "6ed8ebe6-4a23-4996-916d-16044c052e9c" (UID: "6ed8ebe6-4a23-4996-916d-16044c052e9c"). InnerVolumeSpecName "kube-api-access-55jgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.154940 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-scripts" (OuterVolumeSpecName: "scripts") pod "6ed8ebe6-4a23-4996-916d-16044c052e9c" (UID: "6ed8ebe6-4a23-4996-916d-16044c052e9c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.180136 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6ed8ebe6-4a23-4996-916d-16044c052e9c" (UID: "6ed8ebe6-4a23-4996-916d-16044c052e9c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.240888 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ed8ebe6-4a23-4996-916d-16044c052e9c" (UID: "6ed8ebe6-4a23-4996-916d-16044c052e9c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.251141 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.251176 4840 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.251188 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.251202 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55jgj\" (UniqueName: \"kubernetes.io/projected/6ed8ebe6-4a23-4996-916d-16044c052e9c-kube-api-access-55jgj\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.268890 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-config-data" (OuterVolumeSpecName: "config-data") pod "6ed8ebe6-4a23-4996-916d-16044c052e9c" (UID: "6ed8ebe6-4a23-4996-916d-16044c052e9c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.353280 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed8ebe6-4a23-4996-916d-16044c052e9c-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.964364 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6ed8ebe6-4a23-4996-916d-16044c052e9c","Type":"ContainerDied","Data":"dde26d8413f434416eb0dc2f4baa347d8efb12e657f6f9325b6a94036e9bc517"} Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.964661 4840 scope.go:117] "RemoveContainer" containerID="b613078e2558a0ebf66d67bd3581db6b7e0a402395f26725098689cc50039eb4" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.964457 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:00 crc kubenswrapper[4840]: I1209 17:19:00.997261 4840 scope.go:117] "RemoveContainer" containerID="2a25da6a2339ad2a6b6dc481d3fdb0b75edfeb3920858320efc2c1c006f0ace3" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.027059 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.030678 4840 scope.go:117] "RemoveContainer" containerID="c5ea309a971dfbbfca27ca31e6e2a5f35628e0d75e71e4ec34a2f07a6af82b50" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.037267 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.045997 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:01 crc kubenswrapper[4840]: E1209 17:19:01.046549 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="ceilometer-central-agent" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.046579 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="ceilometer-central-agent" Dec 09 17:19:01 crc kubenswrapper[4840]: E1209 17:19:01.046603 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="ceilometer-notification-agent" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.046611 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="ceilometer-notification-agent" Dec 09 17:19:01 crc kubenswrapper[4840]: E1209 17:19:01.046627 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="proxy-httpd" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.046635 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="proxy-httpd" Dec 09 17:19:01 crc kubenswrapper[4840]: E1209 17:19:01.046674 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="sg-core" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.046681 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="sg-core" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.046941 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="ceilometer-notification-agent" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.046986 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="sg-core" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.047006 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="ceilometer-central-agent" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.047014 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" containerName="proxy-httpd" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.049435 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.051402 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.051599 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.053135 4840 scope.go:117] "RemoveContainer" containerID="f265a9584e6d81211bd62e00de6fe1a7a1b441d63e814ace8a392ed99824fb45" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.054513 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.173510 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sscp\" (UniqueName: \"kubernetes.io/projected/7a8bbd7a-fea2-469d-beac-447fd8b9f308-kube-api-access-7sscp\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.173583 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-config-data\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.173997 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-run-httpd\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.174046 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.174093 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-scripts\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.174412 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-log-httpd\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.174467 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.276423 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-scripts\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.276606 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-log-httpd\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.276648 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.276693 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sscp\" (UniqueName: \"kubernetes.io/projected/7a8bbd7a-fea2-469d-beac-447fd8b9f308-kube-api-access-7sscp\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.276770 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-config-data\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.276828 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-run-httpd\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.276883 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.277638 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-run-httpd\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.277686 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-log-httpd\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.281508 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-scripts\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.281561 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.281860 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-config-data\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.283235 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.295579 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sscp\" (UniqueName: \"kubernetes.io/projected/7a8bbd7a-fea2-469d-beac-447fd8b9f308-kube-api-access-7sscp\") pod \"ceilometer-0\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.383124 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.750540 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.751118 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="0485b258-b631-4740-a0bb-ae386586c833" containerName="glance-log" containerID="cri-o://626b7bfa78e717405b54e7f386fa77b7330f8d705011bbd94943f37ad0462ba1" gracePeriod=30 Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.751203 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="0485b258-b631-4740-a0bb-ae386586c833" containerName="glance-httpd" containerID="cri-o://1433c408e61e693379371b2479e67fb674c78d2bbed7d61f213fe5c0949a86e6" gracePeriod=30 Dec 09 17:19:01 crc kubenswrapper[4840]: W1209 17:19:01.907129 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a8bbd7a_fea2_469d_beac_447fd8b9f308.slice/crio-a6dadae68c510cfbab025a993a3727131a9f9f03651f79171e8a6b8ae78ec7dd WatchSource:0}: Error finding container a6dadae68c510cfbab025a993a3727131a9f9f03651f79171e8a6b8ae78ec7dd: Status 404 returned error can't find the container with id a6dadae68c510cfbab025a993a3727131a9f9f03651f79171e8a6b8ae78ec7dd Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.912161 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.978412 4840 generic.go:334] "Generic (PLEG): container finished" podID="0485b258-b631-4740-a0bb-ae386586c833" containerID="626b7bfa78e717405b54e7f386fa77b7330f8d705011bbd94943f37ad0462ba1" exitCode=143 Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.978493 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0485b258-b631-4740-a0bb-ae386586c833","Type":"ContainerDied","Data":"626b7bfa78e717405b54e7f386fa77b7330f8d705011bbd94943f37ad0462ba1"} Dec 09 17:19:01 crc kubenswrapper[4840]: I1209 17:19:01.980183 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a8bbd7a-fea2-469d-beac-447fd8b9f308","Type":"ContainerStarted","Data":"a6dadae68c510cfbab025a993a3727131a9f9f03651f79171e8a6b8ae78ec7dd"} Dec 09 17:19:02 crc kubenswrapper[4840]: I1209 17:19:02.622929 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ed8ebe6-4a23-4996-916d-16044c052e9c" path="/var/lib/kubelet/pods/6ed8ebe6-4a23-4996-916d-16044c052e9c/volumes" Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.560264 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.616158 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-l72ts"] Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.617644 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-l72ts" Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.639150 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-l72ts"] Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.728113 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m66r9\" (UniqueName: \"kubernetes.io/projected/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-kube-api-access-m66r9\") pod \"nova-api-db-create-l72ts\" (UID: \"1e6c81c8-f19e-4262-be49-f7b0f5dc707f\") " pod="openstack/nova-api-db-create-l72ts" Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.728285 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-operator-scripts\") pod \"nova-api-db-create-l72ts\" (UID: \"1e6c81c8-f19e-4262-be49-f7b0f5dc707f\") " pod="openstack/nova-api-db-create-l72ts" Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.829790 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-operator-scripts\") pod \"nova-api-db-create-l72ts\" (UID: \"1e6c81c8-f19e-4262-be49-f7b0f5dc707f\") " pod="openstack/nova-api-db-create-l72ts" Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.829917 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m66r9\" (UniqueName: \"kubernetes.io/projected/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-kube-api-access-m66r9\") pod \"nova-api-db-create-l72ts\" (UID: \"1e6c81c8-f19e-4262-be49-f7b0f5dc707f\") " pod="openstack/nova-api-db-create-l72ts" Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.830598 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-operator-scripts\") pod \"nova-api-db-create-l72ts\" (UID: \"1e6c81c8-f19e-4262-be49-f7b0f5dc707f\") " pod="openstack/nova-api-db-create-l72ts" Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.841982 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-4brzw"] Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.843437 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-4brzw" Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.853742 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m66r9\" (UniqueName: \"kubernetes.io/projected/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-kube-api-access-m66r9\") pod \"nova-api-db-create-l72ts\" (UID: \"1e6c81c8-f19e-4262-be49-f7b0f5dc707f\") " pod="openstack/nova-api-db-create-l72ts" Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.872014 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-88e8-account-create-update-dqtnk"] Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.873700 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-88e8-account-create-update-dqtnk" Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.878430 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.922449 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-4brzw"] Dec 09 17:19:03 crc kubenswrapper[4840]: I1209 17:19:03.924491 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-88e8-account-create-update-dqtnk"] Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.002031 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a8bbd7a-fea2-469d-beac-447fd8b9f308","Type":"ContainerStarted","Data":"67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c"} Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.009290 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-l72ts" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.035086 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1016da39-4885-4421-95bb-07c658b86dfd-operator-scripts\") pod \"nova-api-88e8-account-create-update-dqtnk\" (UID: \"1016da39-4885-4421-95bb-07c658b86dfd\") " pod="openstack/nova-api-88e8-account-create-update-dqtnk" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.035254 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4tnk\" (UniqueName: \"kubernetes.io/projected/0b68c48b-8b21-4a23-ad36-d987eeae2757-kube-api-access-m4tnk\") pod \"nova-cell0-db-create-4brzw\" (UID: \"0b68c48b-8b21-4a23-ad36-d987eeae2757\") " pod="openstack/nova-cell0-db-create-4brzw" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.035350 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4vms\" (UniqueName: \"kubernetes.io/projected/1016da39-4885-4421-95bb-07c658b86dfd-kube-api-access-x4vms\") pod \"nova-api-88e8-account-create-update-dqtnk\" (UID: \"1016da39-4885-4421-95bb-07c658b86dfd\") " pod="openstack/nova-api-88e8-account-create-update-dqtnk" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.035386 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0b68c48b-8b21-4a23-ad36-d987eeae2757-operator-scripts\") pod \"nova-cell0-db-create-4brzw\" (UID: \"0b68c48b-8b21-4a23-ad36-d987eeae2757\") " pod="openstack/nova-cell0-db-create-4brzw" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.035801 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.035837 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.035873 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.036368 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"05e98f63a75f1d00a4b05aafffb49ac3d5f6082b4645459897faa5f48fc3ff01"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.036423 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://05e98f63a75f1d00a4b05aafffb49ac3d5f6082b4645459897faa5f48fc3ff01" gracePeriod=600 Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.056384 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-t7sm2"] Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.057749 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-t7sm2" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.073995 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-t7sm2"] Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.080166 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-e8ef-account-create-update-g9drg"] Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.081555 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.085435 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.099265 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e8ef-account-create-update-g9drg"] Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.137575 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4tnk\" (UniqueName: \"kubernetes.io/projected/0b68c48b-8b21-4a23-ad36-d987eeae2757-kube-api-access-m4tnk\") pod \"nova-cell0-db-create-4brzw\" (UID: \"0b68c48b-8b21-4a23-ad36-d987eeae2757\") " pod="openstack/nova-cell0-db-create-4brzw" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.137795 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4vms\" (UniqueName: \"kubernetes.io/projected/1016da39-4885-4421-95bb-07c658b86dfd-kube-api-access-x4vms\") pod \"nova-api-88e8-account-create-update-dqtnk\" (UID: \"1016da39-4885-4421-95bb-07c658b86dfd\") " pod="openstack/nova-api-88e8-account-create-update-dqtnk" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.137855 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0b68c48b-8b21-4a23-ad36-d987eeae2757-operator-scripts\") pod \"nova-cell0-db-create-4brzw\" (UID: \"0b68c48b-8b21-4a23-ad36-d987eeae2757\") " pod="openstack/nova-cell0-db-create-4brzw" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.137923 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1016da39-4885-4421-95bb-07c658b86dfd-operator-scripts\") pod \"nova-api-88e8-account-create-update-dqtnk\" (UID: \"1016da39-4885-4421-95bb-07c658b86dfd\") " pod="openstack/nova-api-88e8-account-create-update-dqtnk" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.138858 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1016da39-4885-4421-95bb-07c658b86dfd-operator-scripts\") pod \"nova-api-88e8-account-create-update-dqtnk\" (UID: \"1016da39-4885-4421-95bb-07c658b86dfd\") " pod="openstack/nova-api-88e8-account-create-update-dqtnk" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.141319 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0b68c48b-8b21-4a23-ad36-d987eeae2757-operator-scripts\") pod \"nova-cell0-db-create-4brzw\" (UID: \"0b68c48b-8b21-4a23-ad36-d987eeae2757\") " pod="openstack/nova-cell0-db-create-4brzw" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.169720 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4tnk\" (UniqueName: \"kubernetes.io/projected/0b68c48b-8b21-4a23-ad36-d987eeae2757-kube-api-access-m4tnk\") pod \"nova-cell0-db-create-4brzw\" (UID: \"0b68c48b-8b21-4a23-ad36-d987eeae2757\") " pod="openstack/nova-cell0-db-create-4brzw" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.172792 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4vms\" (UniqueName: \"kubernetes.io/projected/1016da39-4885-4421-95bb-07c658b86dfd-kube-api-access-x4vms\") pod \"nova-api-88e8-account-create-update-dqtnk\" (UID: \"1016da39-4885-4421-95bb-07c658b86dfd\") " pod="openstack/nova-api-88e8-account-create-update-dqtnk" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.240417 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4691d812-4f31-4379-9b25-e1fcafa891ff-operator-scripts\") pod \"nova-cell0-e8ef-account-create-update-g9drg\" (UID: \"4691d812-4f31-4379-9b25-e1fcafa891ff\") " pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.240513 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgv22\" (UniqueName: \"kubernetes.io/projected/4691d812-4f31-4379-9b25-e1fcafa891ff-kube-api-access-mgv22\") pod \"nova-cell0-e8ef-account-create-update-g9drg\" (UID: \"4691d812-4f31-4379-9b25-e1fcafa891ff\") " pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.240592 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-operator-scripts\") pod \"nova-cell1-db-create-t7sm2\" (UID: \"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4\") " pod="openstack/nova-cell1-db-create-t7sm2" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.240619 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hsvjf\" (UniqueName: \"kubernetes.io/projected/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-kube-api-access-hsvjf\") pod \"nova-cell1-db-create-t7sm2\" (UID: \"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4\") " pod="openstack/nova-cell1-db-create-t7sm2" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.268717 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-4brzw" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.274858 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-88e8-account-create-update-dqtnk" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.286576 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-f458-account-create-update-mmdhp"] Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.287916 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f458-account-create-update-mmdhp" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.293166 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.312247 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-f458-account-create-update-mmdhp"] Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.343248 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4691d812-4f31-4379-9b25-e1fcafa891ff-operator-scripts\") pod \"nova-cell0-e8ef-account-create-update-g9drg\" (UID: \"4691d812-4f31-4379-9b25-e1fcafa891ff\") " pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.343654 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgv22\" (UniqueName: \"kubernetes.io/projected/4691d812-4f31-4379-9b25-e1fcafa891ff-kube-api-access-mgv22\") pod \"nova-cell0-e8ef-account-create-update-g9drg\" (UID: \"4691d812-4f31-4379-9b25-e1fcafa891ff\") " pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.343801 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-operator-scripts\") pod \"nova-cell1-db-create-t7sm2\" (UID: \"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4\") " pod="openstack/nova-cell1-db-create-t7sm2" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.343893 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hsvjf\" (UniqueName: \"kubernetes.io/projected/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-kube-api-access-hsvjf\") pod \"nova-cell1-db-create-t7sm2\" (UID: \"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4\") " pod="openstack/nova-cell1-db-create-t7sm2" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.345359 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4691d812-4f31-4379-9b25-e1fcafa891ff-operator-scripts\") pod \"nova-cell0-e8ef-account-create-update-g9drg\" (UID: \"4691d812-4f31-4379-9b25-e1fcafa891ff\") " pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.346308 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-operator-scripts\") pod \"nova-cell1-db-create-t7sm2\" (UID: \"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4\") " pod="openstack/nova-cell1-db-create-t7sm2" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.374226 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgv22\" (UniqueName: \"kubernetes.io/projected/4691d812-4f31-4379-9b25-e1fcafa891ff-kube-api-access-mgv22\") pod \"nova-cell0-e8ef-account-create-update-g9drg\" (UID: \"4691d812-4f31-4379-9b25-e1fcafa891ff\") " pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.377352 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hsvjf\" (UniqueName: \"kubernetes.io/projected/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-kube-api-access-hsvjf\") pod \"nova-cell1-db-create-t7sm2\" (UID: \"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4\") " pod="openstack/nova-cell1-db-create-t7sm2" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.446202 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bprs\" (UniqueName: \"kubernetes.io/projected/8b92c382-2599-4692-9fc6-557ef858013d-kube-api-access-7bprs\") pod \"nova-cell1-f458-account-create-update-mmdhp\" (UID: \"8b92c382-2599-4692-9fc6-557ef858013d\") " pod="openstack/nova-cell1-f458-account-create-update-mmdhp" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.446294 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b92c382-2599-4692-9fc6-557ef858013d-operator-scripts\") pod \"nova-cell1-f458-account-create-update-mmdhp\" (UID: \"8b92c382-2599-4692-9fc6-557ef858013d\") " pod="openstack/nova-cell1-f458-account-create-update-mmdhp" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.549020 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bprs\" (UniqueName: \"kubernetes.io/projected/8b92c382-2599-4692-9fc6-557ef858013d-kube-api-access-7bprs\") pod \"nova-cell1-f458-account-create-update-mmdhp\" (UID: \"8b92c382-2599-4692-9fc6-557ef858013d\") " pod="openstack/nova-cell1-f458-account-create-update-mmdhp" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.549100 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b92c382-2599-4692-9fc6-557ef858013d-operator-scripts\") pod \"nova-cell1-f458-account-create-update-mmdhp\" (UID: \"8b92c382-2599-4692-9fc6-557ef858013d\") " pod="openstack/nova-cell1-f458-account-create-update-mmdhp" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.549840 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b92c382-2599-4692-9fc6-557ef858013d-operator-scripts\") pod \"nova-cell1-f458-account-create-update-mmdhp\" (UID: \"8b92c382-2599-4692-9fc6-557ef858013d\") " pod="openstack/nova-cell1-f458-account-create-update-mmdhp" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.563329 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-t7sm2" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.569754 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bprs\" (UniqueName: \"kubernetes.io/projected/8b92c382-2599-4692-9fc6-557ef858013d-kube-api-access-7bprs\") pod \"nova-cell1-f458-account-create-update-mmdhp\" (UID: \"8b92c382-2599-4692-9fc6-557ef858013d\") " pod="openstack/nova-cell1-f458-account-create-update-mmdhp" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.581143 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-l72ts"] Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.584131 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" Dec 09 17:19:04 crc kubenswrapper[4840]: W1209 17:19:04.633354 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e6c81c8_f19e_4262_be49_f7b0f5dc707f.slice/crio-a8ecb4ce3ed14369dd8a91c7e7ef7594d261f4994e52d9f768b175c1e35324ed WatchSource:0}: Error finding container a8ecb4ce3ed14369dd8a91c7e7ef7594d261f4994e52d9f768b175c1e35324ed: Status 404 returned error can't find the container with id a8ecb4ce3ed14369dd8a91c7e7ef7594d261f4994e52d9f768b175c1e35324ed Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.696853 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f458-account-create-update-mmdhp" Dec 09 17:19:04 crc kubenswrapper[4840]: I1209 17:19:04.912011 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-4brzw"] Dec 09 17:19:05 crc kubenswrapper[4840]: I1209 17:19:05.047126 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-4brzw" event={"ID":"0b68c48b-8b21-4a23-ad36-d987eeae2757","Type":"ContainerStarted","Data":"31a27189599bca24159e3cc1add729492bd5ce311309c7a3e055535a69f20a75"} Dec 09 17:19:05 crc kubenswrapper[4840]: I1209 17:19:05.053290 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-l72ts" event={"ID":"1e6c81c8-f19e-4262-be49-f7b0f5dc707f","Type":"ContainerStarted","Data":"a8ecb4ce3ed14369dd8a91c7e7ef7594d261f4994e52d9f768b175c1e35324ed"} Dec 09 17:19:05 crc kubenswrapper[4840]: I1209 17:19:05.063586 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="05e98f63a75f1d00a4b05aafffb49ac3d5f6082b4645459897faa5f48fc3ff01" exitCode=0 Dec 09 17:19:05 crc kubenswrapper[4840]: I1209 17:19:05.063624 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"05e98f63a75f1d00a4b05aafffb49ac3d5f6082b4645459897faa5f48fc3ff01"} Dec 09 17:19:05 crc kubenswrapper[4840]: I1209 17:19:05.063652 4840 scope.go:117] "RemoveContainer" containerID="e12e998619dcfb414f5abc4e5512aafccfda5811abde023bf5ca07762965de9f" Dec 09 17:19:05 crc kubenswrapper[4840]: I1209 17:19:05.083675 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-88e8-account-create-update-dqtnk"] Dec 09 17:19:05 crc kubenswrapper[4840]: I1209 17:19:05.181466 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-t7sm2"] Dec 09 17:19:05 crc kubenswrapper[4840]: I1209 17:19:05.279659 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e8ef-account-create-update-g9drg"] Dec 09 17:19:05 crc kubenswrapper[4840]: I1209 17:19:05.958599 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-f458-account-create-update-mmdhp"] Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.149121 4840 generic.go:334] "Generic (PLEG): container finished" podID="1e6c81c8-f19e-4262-be49-f7b0f5dc707f" containerID="fbe04ed4cf5ed7a4021ff4fd97f03393e2f65a064b2a6d987117603817dbac03" exitCode=0 Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.149692 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-l72ts" event={"ID":"1e6c81c8-f19e-4262-be49-f7b0f5dc707f","Type":"ContainerDied","Data":"fbe04ed4cf5ed7a4021ff4fd97f03393e2f65a064b2a6d987117603817dbac03"} Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.196888 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a8bbd7a-fea2-469d-beac-447fd8b9f308","Type":"ContainerStarted","Data":"b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de"} Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.201651 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-t7sm2" event={"ID":"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4","Type":"ContainerStarted","Data":"2f5a92a46f2284a8f6a5d84c987a7df97201c78acf4b4214641c42cc35bb5d59"} Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.201716 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-t7sm2" event={"ID":"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4","Type":"ContainerStarted","Data":"4d2ef05471e93ca80110d28db5354be7cd52a71e26e1ecfa9a35c734ac020f70"} Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.203386 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-88e8-account-create-update-dqtnk" event={"ID":"1016da39-4885-4421-95bb-07c658b86dfd","Type":"ContainerStarted","Data":"893fae742bdcf8e694bcc4710dabaee3cda4c242488379d6e699d4b21f5f87cf"} Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.204717 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-4brzw" event={"ID":"0b68c48b-8b21-4a23-ad36-d987eeae2757","Type":"ContainerStarted","Data":"81fcdb981828bbad17175a0d13cb25175f65aad82fba157487ecc9166a4378a1"} Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.211887 4840 generic.go:334] "Generic (PLEG): container finished" podID="0485b258-b631-4740-a0bb-ae386586c833" containerID="1433c408e61e693379371b2479e67fb674c78d2bbed7d61f213fe5c0949a86e6" exitCode=0 Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.211924 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0485b258-b631-4740-a0bb-ae386586c833","Type":"ContainerDied","Data":"1433c408e61e693379371b2479e67fb674c78d2bbed7d61f213fe5c0949a86e6"} Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.214819 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f458-account-create-update-mmdhp" event={"ID":"8b92c382-2599-4692-9fc6-557ef858013d","Type":"ContainerStarted","Data":"359a71a62b5c8d99786ab6750785e12f31e10805ac3f4265c5b6c1ad28cad2e4"} Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.219084 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" event={"ID":"4691d812-4f31-4379-9b25-e1fcafa891ff","Type":"ContainerStarted","Data":"b0988082d6a8a56c7be85960a9e04ad5a851fc88df606024128c0a335697fdc2"} Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.226212 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-4brzw" podStartSLOduration=3.226193028 podStartE2EDuration="3.226193028s" podCreationTimestamp="2025-12-09 17:19:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:19:06.223357009 +0000 UTC m=+1332.214467642" watchObservedRunningTime="2025-12-09 17:19:06.226193028 +0000 UTC m=+1332.217303661" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.226311 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.402316 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-combined-ca-bundle\") pod \"0485b258-b631-4740-a0bb-ae386586c833\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.402686 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-public-tls-certs\") pod \"0485b258-b631-4740-a0bb-ae386586c833\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.402779 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncfb5\" (UniqueName: \"kubernetes.io/projected/0485b258-b631-4740-a0bb-ae386586c833-kube-api-access-ncfb5\") pod \"0485b258-b631-4740-a0bb-ae386586c833\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.402921 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"0485b258-b631-4740-a0bb-ae386586c833\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.403020 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-config-data\") pod \"0485b258-b631-4740-a0bb-ae386586c833\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.403074 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-scripts\") pod \"0485b258-b631-4740-a0bb-ae386586c833\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.403117 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-logs\") pod \"0485b258-b631-4740-a0bb-ae386586c833\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.403147 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-httpd-run\") pod \"0485b258-b631-4740-a0bb-ae386586c833\" (UID: \"0485b258-b631-4740-a0bb-ae386586c833\") " Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.404091 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-logs" (OuterVolumeSpecName: "logs") pod "0485b258-b631-4740-a0bb-ae386586c833" (UID: "0485b258-b631-4740-a0bb-ae386586c833"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.404250 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0485b258-b631-4740-a0bb-ae386586c833" (UID: "0485b258-b631-4740-a0bb-ae386586c833"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.407329 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-scripts" (OuterVolumeSpecName: "scripts") pod "0485b258-b631-4740-a0bb-ae386586c833" (UID: "0485b258-b631-4740-a0bb-ae386586c833"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.409257 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0485b258-b631-4740-a0bb-ae386586c833-kube-api-access-ncfb5" (OuterVolumeSpecName: "kube-api-access-ncfb5") pod "0485b258-b631-4740-a0bb-ae386586c833" (UID: "0485b258-b631-4740-a0bb-ae386586c833"). InnerVolumeSpecName "kube-api-access-ncfb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.419665 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56" (OuterVolumeSpecName: "glance") pod "0485b258-b631-4740-a0bb-ae386586c833" (UID: "0485b258-b631-4740-a0bb-ae386586c833"). InnerVolumeSpecName "pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.431780 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0485b258-b631-4740-a0bb-ae386586c833" (UID: "0485b258-b631-4740-a0bb-ae386586c833"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.478051 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "0485b258-b631-4740-a0bb-ae386586c833" (UID: "0485b258-b631-4740-a0bb-ae386586c833"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.493055 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-config-data" (OuterVolumeSpecName: "config-data") pod "0485b258-b631-4740-a0bb-ae386586c833" (UID: "0485b258-b631-4740-a0bb-ae386586c833"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.505394 4840 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") on node \"crc\" " Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.505426 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.505437 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.505462 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.505469 4840 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0485b258-b631-4740-a0bb-ae386586c833-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.505478 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.505489 4840 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0485b258-b631-4740-a0bb-ae386586c833-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.505498 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncfb5\" (UniqueName: \"kubernetes.io/projected/0485b258-b631-4740-a0bb-ae386586c833-kube-api-access-ncfb5\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.530556 4840 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.530723 4840 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56") on node "crc" Dec 09 17:19:06 crc kubenswrapper[4840]: I1209 17:19:06.610938 4840 reconciler_common.go:293] "Volume detached for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.237951 4840 generic.go:334] "Generic (PLEG): container finished" podID="0b68c48b-8b21-4a23-ad36-d987eeae2757" containerID="81fcdb981828bbad17175a0d13cb25175f65aad82fba157487ecc9166a4378a1" exitCode=0 Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.238477 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-4brzw" event={"ID":"0b68c48b-8b21-4a23-ad36-d987eeae2757","Type":"ContainerDied","Data":"81fcdb981828bbad17175a0d13cb25175f65aad82fba157487ecc9166a4378a1"} Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.241768 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f458-account-create-update-mmdhp" event={"ID":"8b92c382-2599-4692-9fc6-557ef858013d","Type":"ContainerStarted","Data":"001569c1dde279ca7136d97884b55c5dd1883104d3d77cb098824b2940ac3d55"} Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.251308 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3"} Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.257605 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" event={"ID":"4691d812-4f31-4379-9b25-e1fcafa891ff","Type":"ContainerStarted","Data":"96ed9e8bfa2fc1a1335d042506e471b3ce52f55ec0a4173d9a7bc047ad58451d"} Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.267560 4840 generic.go:334] "Generic (PLEG): container finished" podID="f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4" containerID="2f5a92a46f2284a8f6a5d84c987a7df97201c78acf4b4214641c42cc35bb5d59" exitCode=0 Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.267660 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-t7sm2" event={"ID":"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4","Type":"ContainerDied","Data":"2f5a92a46f2284a8f6a5d84c987a7df97201c78acf4b4214641c42cc35bb5d59"} Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.269935 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0485b258-b631-4740-a0bb-ae386586c833","Type":"ContainerDied","Data":"992b04068e781992f79a473904dcf715fc2851599f0f23b6ae46c50e6a21879b"} Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.270033 4840 scope.go:117] "RemoveContainer" containerID="1433c408e61e693379371b2479e67fb674c78d2bbed7d61f213fe5c0949a86e6" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.270056 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.295224 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-88e8-account-create-update-dqtnk" event={"ID":"1016da39-4885-4421-95bb-07c658b86dfd","Type":"ContainerStarted","Data":"5a7c908211ae13f1ea15dda93641fd37f1e30ed5ab6bf1efa0260a58d67dc107"} Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.306342 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-f458-account-create-update-mmdhp" podStartSLOduration=3.306321411 podStartE2EDuration="3.306321411s" podCreationTimestamp="2025-12-09 17:19:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:19:07.30094213 +0000 UTC m=+1333.292052753" watchObservedRunningTime="2025-12-09 17:19:07.306321411 +0000 UTC m=+1333.297432044" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.317374 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" podStartSLOduration=3.317355291 podStartE2EDuration="3.317355291s" podCreationTimestamp="2025-12-09 17:19:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:19:07.316562329 +0000 UTC m=+1333.307672962" watchObservedRunningTime="2025-12-09 17:19:07.317355291 +0000 UTC m=+1333.308465924" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.375384 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.379893 4840 scope.go:117] "RemoveContainer" containerID="626b7bfa78e717405b54e7f386fa77b7330f8d705011bbd94943f37ad0462ba1" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.389696 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.394531 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-88e8-account-create-update-dqtnk" podStartSLOduration=4.394507865 podStartE2EDuration="4.394507865s" podCreationTimestamp="2025-12-09 17:19:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:19:07.344351218 +0000 UTC m=+1333.335461841" watchObservedRunningTime="2025-12-09 17:19:07.394507865 +0000 UTC m=+1333.385618498" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.439546 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:19:07 crc kubenswrapper[4840]: E1209 17:19:07.440612 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0485b258-b631-4740-a0bb-ae386586c833" containerName="glance-log" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.440630 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0485b258-b631-4740-a0bb-ae386586c833" containerName="glance-log" Dec 09 17:19:07 crc kubenswrapper[4840]: E1209 17:19:07.440639 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0485b258-b631-4740-a0bb-ae386586c833" containerName="glance-httpd" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.440646 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0485b258-b631-4740-a0bb-ae386586c833" containerName="glance-httpd" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.440911 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="0485b258-b631-4740-a0bb-ae386586c833" containerName="glance-httpd" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.440936 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="0485b258-b631-4740-a0bb-ae386586c833" containerName="glance-log" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.442577 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.450270 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.450545 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.489714 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.534257 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hw8z\" (UniqueName: \"kubernetes.io/projected/6f9babec-3c0f-47f7-bb1a-e898e153374e-kube-api-access-4hw8z\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.534309 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.534361 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.534400 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-scripts\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.534432 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f9babec-3c0f-47f7-bb1a-e898e153374e-logs\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.534467 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.534484 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-config-data\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.534512 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f9babec-3c0f-47f7-bb1a-e898e153374e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.636514 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-scripts\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.636608 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f9babec-3c0f-47f7-bb1a-e898e153374e-logs\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.636665 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.636692 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-config-data\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.636733 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f9babec-3c0f-47f7-bb1a-e898e153374e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.636808 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hw8z\" (UniqueName: \"kubernetes.io/projected/6f9babec-3c0f-47f7-bb1a-e898e153374e-kube-api-access-4hw8z\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.636849 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.636914 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.638176 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f9babec-3c0f-47f7-bb1a-e898e153374e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.641405 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f9babec-3c0f-47f7-bb1a-e898e153374e-logs\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.644690 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-scripts\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.647938 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.648633 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.648669 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/38cd25422c2a393197855a396291af132e300524db65f2672b792068080a1237/globalmount\"" pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.662871 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.663120 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f9babec-3c0f-47f7-bb1a-e898e153374e-config-data\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.670636 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hw8z\" (UniqueName: \"kubernetes.io/projected/6f9babec-3c0f-47f7-bb1a-e898e153374e-kube-api-access-4hw8z\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.773336 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5d63698a-e0f6-4f6f-b72a-b15b13bd8d56\") pod \"glance-default-external-api-0\" (UID: \"6f9babec-3c0f-47f7-bb1a-e898e153374e\") " pod="openstack/glance-default-external-api-0" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.797128 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-l72ts" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.942846 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m66r9\" (UniqueName: \"kubernetes.io/projected/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-kube-api-access-m66r9\") pod \"1e6c81c8-f19e-4262-be49-f7b0f5dc707f\" (UID: \"1e6c81c8-f19e-4262-be49-f7b0f5dc707f\") " Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.943068 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-operator-scripts\") pod \"1e6c81c8-f19e-4262-be49-f7b0f5dc707f\" (UID: \"1e6c81c8-f19e-4262-be49-f7b0f5dc707f\") " Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.943642 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1e6c81c8-f19e-4262-be49-f7b0f5dc707f" (UID: "1e6c81c8-f19e-4262-be49-f7b0f5dc707f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:19:07 crc kubenswrapper[4840]: I1209 17:19:07.980365 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-kube-api-access-m66r9" (OuterVolumeSpecName: "kube-api-access-m66r9") pod "1e6c81c8-f19e-4262-be49-f7b0f5dc707f" (UID: "1e6c81c8-f19e-4262-be49-f7b0f5dc707f"). InnerVolumeSpecName "kube-api-access-m66r9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.045920 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m66r9\" (UniqueName: \"kubernetes.io/projected/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-kube-api-access-m66r9\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.045958 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e6c81c8-f19e-4262-be49-f7b0f5dc707f-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.075381 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.312289 4840 generic.go:334] "Generic (PLEG): container finished" podID="8b92c382-2599-4692-9fc6-557ef858013d" containerID="001569c1dde279ca7136d97884b55c5dd1883104d3d77cb098824b2940ac3d55" exitCode=0 Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.312547 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f458-account-create-update-mmdhp" event={"ID":"8b92c382-2599-4692-9fc6-557ef858013d","Type":"ContainerDied","Data":"001569c1dde279ca7136d97884b55c5dd1883104d3d77cb098824b2940ac3d55"} Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.314507 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a8bbd7a-fea2-469d-beac-447fd8b9f308","Type":"ContainerStarted","Data":"d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca"} Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.317197 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-l72ts" event={"ID":"1e6c81c8-f19e-4262-be49-f7b0f5dc707f","Type":"ContainerDied","Data":"a8ecb4ce3ed14369dd8a91c7e7ef7594d261f4994e52d9f768b175c1e35324ed"} Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.317238 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a8ecb4ce3ed14369dd8a91c7e7ef7594d261f4994e52d9f768b175c1e35324ed" Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.317409 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-l72ts" Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.626852 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0485b258-b631-4740-a0bb-ae386586c833" path="/var/lib/kubelet/pods/0485b258-b631-4740-a0bb-ae386586c833/volumes" Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.803366 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 17:19:08 crc kubenswrapper[4840]: I1209 17:19:08.971765 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-t7sm2" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.007134 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-operator-scripts\") pod \"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4\" (UID: \"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4\") " Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.007322 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hsvjf\" (UniqueName: \"kubernetes.io/projected/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-kube-api-access-hsvjf\") pod \"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4\" (UID: \"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4\") " Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.007855 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4" (UID: "f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.017874 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-kube-api-access-hsvjf" (OuterVolumeSpecName: "kube-api-access-hsvjf") pod "f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4" (UID: "f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4"). InnerVolumeSpecName "kube-api-access-hsvjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.110057 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.110091 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hsvjf\" (UniqueName: \"kubernetes.io/projected/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4-kube-api-access-hsvjf\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.171806 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-4brzw" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.211729 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4tnk\" (UniqueName: \"kubernetes.io/projected/0b68c48b-8b21-4a23-ad36-d987eeae2757-kube-api-access-m4tnk\") pod \"0b68c48b-8b21-4a23-ad36-d987eeae2757\" (UID: \"0b68c48b-8b21-4a23-ad36-d987eeae2757\") " Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.211810 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0b68c48b-8b21-4a23-ad36-d987eeae2757-operator-scripts\") pod \"0b68c48b-8b21-4a23-ad36-d987eeae2757\" (UID: \"0b68c48b-8b21-4a23-ad36-d987eeae2757\") " Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.212737 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b68c48b-8b21-4a23-ad36-d987eeae2757-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0b68c48b-8b21-4a23-ad36-d987eeae2757" (UID: "0b68c48b-8b21-4a23-ad36-d987eeae2757"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.215849 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b68c48b-8b21-4a23-ad36-d987eeae2757-kube-api-access-m4tnk" (OuterVolumeSpecName: "kube-api-access-m4tnk") pod "0b68c48b-8b21-4a23-ad36-d987eeae2757" (UID: "0b68c48b-8b21-4a23-ad36-d987eeae2757"). InnerVolumeSpecName "kube-api-access-m4tnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.314720 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4tnk\" (UniqueName: \"kubernetes.io/projected/0b68c48b-8b21-4a23-ad36-d987eeae2757-kube-api-access-m4tnk\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.314756 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0b68c48b-8b21-4a23-ad36-d987eeae2757-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.336253 4840 generic.go:334] "Generic (PLEG): container finished" podID="4691d812-4f31-4379-9b25-e1fcafa891ff" containerID="96ed9e8bfa2fc1a1335d042506e471b3ce52f55ec0a4173d9a7bc047ad58451d" exitCode=0 Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.336340 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" event={"ID":"4691d812-4f31-4379-9b25-e1fcafa891ff","Type":"ContainerDied","Data":"96ed9e8bfa2fc1a1335d042506e471b3ce52f55ec0a4173d9a7bc047ad58451d"} Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.343412 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f9babec-3c0f-47f7-bb1a-e898e153374e","Type":"ContainerStarted","Data":"43c1fdaa04d4970a14c7d6d86165404bae6640505026e4044666f45a5f28fdca"} Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.347679 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-t7sm2" event={"ID":"f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4","Type":"ContainerDied","Data":"4d2ef05471e93ca80110d28db5354be7cd52a71e26e1ecfa9a35c734ac020f70"} Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.347723 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d2ef05471e93ca80110d28db5354be7cd52a71e26e1ecfa9a35c734ac020f70" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.347805 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-t7sm2" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.351336 4840 generic.go:334] "Generic (PLEG): container finished" podID="1016da39-4885-4421-95bb-07c658b86dfd" containerID="5a7c908211ae13f1ea15dda93641fd37f1e30ed5ab6bf1efa0260a58d67dc107" exitCode=0 Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.351410 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-88e8-account-create-update-dqtnk" event={"ID":"1016da39-4885-4421-95bb-07c658b86dfd","Type":"ContainerDied","Data":"5a7c908211ae13f1ea15dda93641fd37f1e30ed5ab6bf1efa0260a58d67dc107"} Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.354924 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-4brzw" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.355348 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-4brzw" event={"ID":"0b68c48b-8b21-4a23-ad36-d987eeae2757","Type":"ContainerDied","Data":"31a27189599bca24159e3cc1add729492bd5ce311309c7a3e055535a69f20a75"} Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.355398 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31a27189599bca24159e3cc1add729492bd5ce311309c7a3e055535a69f20a75" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.760022 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f458-account-create-update-mmdhp" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.925869 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bprs\" (UniqueName: \"kubernetes.io/projected/8b92c382-2599-4692-9fc6-557ef858013d-kube-api-access-7bprs\") pod \"8b92c382-2599-4692-9fc6-557ef858013d\" (UID: \"8b92c382-2599-4692-9fc6-557ef858013d\") " Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.926173 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b92c382-2599-4692-9fc6-557ef858013d-operator-scripts\") pod \"8b92c382-2599-4692-9fc6-557ef858013d\" (UID: \"8b92c382-2599-4692-9fc6-557ef858013d\") " Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.927127 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b92c382-2599-4692-9fc6-557ef858013d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8b92c382-2599-4692-9fc6-557ef858013d" (UID: "8b92c382-2599-4692-9fc6-557ef858013d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:19:09 crc kubenswrapper[4840]: I1209 17:19:09.931107 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b92c382-2599-4692-9fc6-557ef858013d-kube-api-access-7bprs" (OuterVolumeSpecName: "kube-api-access-7bprs") pod "8b92c382-2599-4692-9fc6-557ef858013d" (UID: "8b92c382-2599-4692-9fc6-557ef858013d"). InnerVolumeSpecName "kube-api-access-7bprs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.028630 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bprs\" (UniqueName: \"kubernetes.io/projected/8b92c382-2599-4692-9fc6-557ef858013d-kube-api-access-7bprs\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.028953 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b92c382-2599-4692-9fc6-557ef858013d-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.380246 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f458-account-create-update-mmdhp" Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.381442 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f458-account-create-update-mmdhp" event={"ID":"8b92c382-2599-4692-9fc6-557ef858013d","Type":"ContainerDied","Data":"359a71a62b5c8d99786ab6750785e12f31e10805ac3f4265c5b6c1ad28cad2e4"} Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.381466 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="359a71a62b5c8d99786ab6750785e12f31e10805ac3f4265c5b6c1ad28cad2e4" Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.411697 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a8bbd7a-fea2-469d-beac-447fd8b9f308","Type":"ContainerStarted","Data":"ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d"} Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.411909 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="ceilometer-central-agent" containerID="cri-o://67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c" gracePeriod=30 Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.412231 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.412330 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="proxy-httpd" containerID="cri-o://ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d" gracePeriod=30 Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.412432 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="sg-core" containerID="cri-o://d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca" gracePeriod=30 Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.412497 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="ceilometer-notification-agent" containerID="cri-o://b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de" gracePeriod=30 Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.418232 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f9babec-3c0f-47f7-bb1a-e898e153374e","Type":"ContainerStarted","Data":"019b9e410d364a42be6ac3b3718e6373178963d580dcbbc42522a021f15d1692"} Dec 09 17:19:10 crc kubenswrapper[4840]: I1209 17:19:10.445481 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.495737561 podStartE2EDuration="10.445462789s" podCreationTimestamp="2025-12-09 17:19:00 +0000 UTC" firstStartedPulling="2025-12-09 17:19:01.909762682 +0000 UTC m=+1327.900873305" lastFinishedPulling="2025-12-09 17:19:09.8594879 +0000 UTC m=+1335.850598533" observedRunningTime="2025-12-09 17:19:10.443071822 +0000 UTC m=+1336.434182465" watchObservedRunningTime="2025-12-09 17:19:10.445462789 +0000 UTC m=+1336.436573422" Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.402478 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-88e8-account-create-update-dqtnk" Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.466449 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-88e8-account-create-update-dqtnk" event={"ID":"1016da39-4885-4421-95bb-07c658b86dfd","Type":"ContainerDied","Data":"893fae742bdcf8e694bcc4710dabaee3cda4c242488379d6e699d4b21f5f87cf"} Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.466490 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="893fae742bdcf8e694bcc4710dabaee3cda4c242488379d6e699d4b21f5f87cf" Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.466544 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-88e8-account-create-update-dqtnk" Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.478781 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1016da39-4885-4421-95bb-07c658b86dfd-operator-scripts\") pod \"1016da39-4885-4421-95bb-07c658b86dfd\" (UID: \"1016da39-4885-4421-95bb-07c658b86dfd\") " Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.478850 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4vms\" (UniqueName: \"kubernetes.io/projected/1016da39-4885-4421-95bb-07c658b86dfd-kube-api-access-x4vms\") pod \"1016da39-4885-4421-95bb-07c658b86dfd\" (UID: \"1016da39-4885-4421-95bb-07c658b86dfd\") " Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.480556 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1016da39-4885-4421-95bb-07c658b86dfd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1016da39-4885-4421-95bb-07c658b86dfd" (UID: "1016da39-4885-4421-95bb-07c658b86dfd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.499663 4840 generic.go:334] "Generic (PLEG): container finished" podID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerID="ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d" exitCode=0 Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.499694 4840 generic.go:334] "Generic (PLEG): container finished" podID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerID="d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca" exitCode=2 Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.499702 4840 generic.go:334] "Generic (PLEG): container finished" podID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerID="b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de" exitCode=0 Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.499756 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a8bbd7a-fea2-469d-beac-447fd8b9f308","Type":"ContainerDied","Data":"ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d"} Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.499780 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a8bbd7a-fea2-469d-beac-447fd8b9f308","Type":"ContainerDied","Data":"d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca"} Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.499789 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a8bbd7a-fea2-469d-beac-447fd8b9f308","Type":"ContainerDied","Data":"b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de"} Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.546235 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1016da39-4885-4421-95bb-07c658b86dfd-kube-api-access-x4vms" (OuterVolumeSpecName: "kube-api-access-x4vms") pod "1016da39-4885-4421-95bb-07c658b86dfd" (UID: "1016da39-4885-4421-95bb-07c658b86dfd"). InnerVolumeSpecName "kube-api-access-x4vms". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.557401 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f9babec-3c0f-47f7-bb1a-e898e153374e","Type":"ContainerStarted","Data":"668c49596206cc364327c9d76433266a0c96cf424c677c2438aa82e9555c8ed2"} Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.582539 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1016da39-4885-4421-95bb-07c658b86dfd-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.582568 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4vms\" (UniqueName: \"kubernetes.io/projected/1016da39-4885-4421-95bb-07c658b86dfd-kube-api-access-x4vms\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:11 crc kubenswrapper[4840]: I1209 17:19:11.603337 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.603321683 podStartE2EDuration="4.603321683s" podCreationTimestamp="2025-12-09 17:19:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:19:11.602706826 +0000 UTC m=+1337.593817459" watchObservedRunningTime="2025-12-09 17:19:11.603321683 +0000 UTC m=+1337.594432306" Dec 09 17:19:13 crc kubenswrapper[4840]: I1209 17:19:13.363228 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" Dec 09 17:19:13 crc kubenswrapper[4840]: I1209 17:19:13.525547 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4691d812-4f31-4379-9b25-e1fcafa891ff-operator-scripts\") pod \"4691d812-4f31-4379-9b25-e1fcafa891ff\" (UID: \"4691d812-4f31-4379-9b25-e1fcafa891ff\") " Dec 09 17:19:13 crc kubenswrapper[4840]: I1209 17:19:13.525929 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgv22\" (UniqueName: \"kubernetes.io/projected/4691d812-4f31-4379-9b25-e1fcafa891ff-kube-api-access-mgv22\") pod \"4691d812-4f31-4379-9b25-e1fcafa891ff\" (UID: \"4691d812-4f31-4379-9b25-e1fcafa891ff\") " Dec 09 17:19:13 crc kubenswrapper[4840]: I1209 17:19:13.526105 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4691d812-4f31-4379-9b25-e1fcafa891ff-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4691d812-4f31-4379-9b25-e1fcafa891ff" (UID: "4691d812-4f31-4379-9b25-e1fcafa891ff"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:19:13 crc kubenswrapper[4840]: I1209 17:19:13.526400 4840 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4691d812-4f31-4379-9b25-e1fcafa891ff-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:13 crc kubenswrapper[4840]: I1209 17:19:13.531029 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4691d812-4f31-4379-9b25-e1fcafa891ff-kube-api-access-mgv22" (OuterVolumeSpecName: "kube-api-access-mgv22") pod "4691d812-4f31-4379-9b25-e1fcafa891ff" (UID: "4691d812-4f31-4379-9b25-e1fcafa891ff"). InnerVolumeSpecName "kube-api-access-mgv22". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:13 crc kubenswrapper[4840]: I1209 17:19:13.582076 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" event={"ID":"4691d812-4f31-4379-9b25-e1fcafa891ff","Type":"ContainerDied","Data":"b0988082d6a8a56c7be85960a9e04ad5a851fc88df606024128c0a335697fdc2"} Dec 09 17:19:13 crc kubenswrapper[4840]: I1209 17:19:13.582127 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0988082d6a8a56c7be85960a9e04ad5a851fc88df606024128c0a335697fdc2" Dec 09 17:19:13 crc kubenswrapper[4840]: I1209 17:19:13.582132 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e8ef-account-create-update-g9drg" Dec 09 17:19:13 crc kubenswrapper[4840]: I1209 17:19:13.628826 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgv22\" (UniqueName: \"kubernetes.io/projected/4691d812-4f31-4379-9b25-e1fcafa891ff-kube-api-access-mgv22\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:15 crc kubenswrapper[4840]: I1209 17:19:15.922154 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cloudkitty-api-0" podUID="35c9d492-0c0b-4d85-9235-e7ede2df5752" containerName="cloudkitty-api" probeResult="failure" output="Get \"https://10.217.0.188:8889/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 17:19:15 crc kubenswrapper[4840]: I1209 17:19:15.922155 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cloudkitty-api-0" podUID="35c9d492-0c0b-4d85-9235-e7ede2df5752" containerName="cloudkitty-api" probeResult="failure" output="Get \"https://10.217.0.188:8889/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 17:19:16 crc kubenswrapper[4840]: I1209 17:19:16.911387 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:19:16 crc kubenswrapper[4840]: I1209 17:19:16.911652 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="959b2112-8599-463a-8f23-913766ab1b4e" containerName="glance-log" containerID="cri-o://67e53271296fc29a725fea28590583942bc9bfa4001eb05667e60db577c26c08" gracePeriod=30 Dec 09 17:19:16 crc kubenswrapper[4840]: I1209 17:19:16.911809 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="959b2112-8599-463a-8f23-913766ab1b4e" containerName="glance-httpd" containerID="cri-o://cd11d777ab731590b573139b2ee8a9010d67ab18f1910043d9dda367457ab892" gracePeriod=30 Dec 09 17:19:17 crc kubenswrapper[4840]: I1209 17:19:17.628666 4840 generic.go:334] "Generic (PLEG): container finished" podID="959b2112-8599-463a-8f23-913766ab1b4e" containerID="67e53271296fc29a725fea28590583942bc9bfa4001eb05667e60db577c26c08" exitCode=143 Dec 09 17:19:17 crc kubenswrapper[4840]: I1209 17:19:17.628874 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"959b2112-8599-463a-8f23-913766ab1b4e","Type":"ContainerDied","Data":"67e53271296fc29a725fea28590583942bc9bfa4001eb05667e60db577c26c08"} Dec 09 17:19:18 crc kubenswrapper[4840]: I1209 17:19:18.029666 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-api-0" Dec 09 17:19:18 crc kubenswrapper[4840]: I1209 17:19:18.081683 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 09 17:19:18 crc kubenswrapper[4840]: I1209 17:19:18.081735 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 09 17:19:18 crc kubenswrapper[4840]: I1209 17:19:18.138642 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 09 17:19:18 crc kubenswrapper[4840]: I1209 17:19:18.148580 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 09 17:19:18 crc kubenswrapper[4840]: I1209 17:19:18.638815 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 09 17:19:18 crc kubenswrapper[4840]: I1209 17:19:18.639101 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.430441 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-bwmsc"] Dec 09 17:19:19 crc kubenswrapper[4840]: E1209 17:19:19.430836 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b92c382-2599-4692-9fc6-557ef858013d" containerName="mariadb-account-create-update" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.430859 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b92c382-2599-4692-9fc6-557ef858013d" containerName="mariadb-account-create-update" Dec 09 17:19:19 crc kubenswrapper[4840]: E1209 17:19:19.430884 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b68c48b-8b21-4a23-ad36-d987eeae2757" containerName="mariadb-database-create" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.430892 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b68c48b-8b21-4a23-ad36-d987eeae2757" containerName="mariadb-database-create" Dec 09 17:19:19 crc kubenswrapper[4840]: E1209 17:19:19.430907 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4691d812-4f31-4379-9b25-e1fcafa891ff" containerName="mariadb-account-create-update" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.430914 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="4691d812-4f31-4379-9b25-e1fcafa891ff" containerName="mariadb-account-create-update" Dec 09 17:19:19 crc kubenswrapper[4840]: E1209 17:19:19.430941 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4" containerName="mariadb-database-create" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.430947 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4" containerName="mariadb-database-create" Dec 09 17:19:19 crc kubenswrapper[4840]: E1209 17:19:19.430958 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1016da39-4885-4421-95bb-07c658b86dfd" containerName="mariadb-account-create-update" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.430979 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1016da39-4885-4421-95bb-07c658b86dfd" containerName="mariadb-account-create-update" Dec 09 17:19:19 crc kubenswrapper[4840]: E1209 17:19:19.430993 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e6c81c8-f19e-4262-be49-f7b0f5dc707f" containerName="mariadb-database-create" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.430999 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e6c81c8-f19e-4262-be49-f7b0f5dc707f" containerName="mariadb-database-create" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.431176 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1016da39-4885-4421-95bb-07c658b86dfd" containerName="mariadb-account-create-update" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.431189 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b68c48b-8b21-4a23-ad36-d987eeae2757" containerName="mariadb-database-create" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.431199 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="4691d812-4f31-4379-9b25-e1fcafa891ff" containerName="mariadb-account-create-update" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.431209 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4" containerName="mariadb-database-create" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.431225 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e6c81c8-f19e-4262-be49-f7b0f5dc707f" containerName="mariadb-database-create" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.431241 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b92c382-2599-4692-9fc6-557ef858013d" containerName="mariadb-account-create-update" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.431908 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.433875 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-g7rp4" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.434089 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.435654 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.444026 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-bwmsc"] Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.551372 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-config-data\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.551701 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.551753 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvlk6\" (UniqueName: \"kubernetes.io/projected/9eaf9500-68c1-4f32-a00e-693c221c4cb0-kube-api-access-dvlk6\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.551812 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-scripts\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.653290 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-config-data\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.654237 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.654272 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvlk6\" (UniqueName: \"kubernetes.io/projected/9eaf9500-68c1-4f32-a00e-693c221c4cb0-kube-api-access-dvlk6\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.654317 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-scripts\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.662710 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-scripts\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.666692 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.667513 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-config-data\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.680302 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvlk6\" (UniqueName: \"kubernetes.io/projected/9eaf9500-68c1-4f32-a00e-693c221c4cb0-kube-api-access-dvlk6\") pod \"nova-cell0-conductor-db-sync-bwmsc\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:19 crc kubenswrapper[4840]: I1209 17:19:19.758271 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.290374 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-bwmsc"] Dec 09 17:19:20 crc kubenswrapper[4840]: W1209 17:19:20.294635 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9eaf9500_68c1_4f32_a00e_693c221c4cb0.slice/crio-03c188dc6601ee18d2688d5c8f02d8974ae4a707ad5101b281a01dd185972465 WatchSource:0}: Error finding container 03c188dc6601ee18d2688d5c8f02d8974ae4a707ad5101b281a01dd185972465: Status 404 returned error can't find the container with id 03c188dc6601ee18d2688d5c8f02d8974ae4a707ad5101b281a01dd185972465 Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.669278 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-bwmsc" event={"ID":"9eaf9500-68c1-4f32-a00e-693c221c4cb0","Type":"ContainerStarted","Data":"03c188dc6601ee18d2688d5c8f02d8974ae4a707ad5101b281a01dd185972465"} Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.678702 4840 generic.go:334] "Generic (PLEG): container finished" podID="959b2112-8599-463a-8f23-913766ab1b4e" containerID="cd11d777ab731590b573139b2ee8a9010d67ab18f1910043d9dda367457ab892" exitCode=0 Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.678786 4840 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.678797 4840 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.679649 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"959b2112-8599-463a-8f23-913766ab1b4e","Type":"ContainerDied","Data":"cd11d777ab731590b573139b2ee8a9010d67ab18f1910043d9dda367457ab892"} Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.863203 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.988039 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-httpd-run\") pod \"959b2112-8599-463a-8f23-913766ab1b4e\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.988138 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87m5l\" (UniqueName: \"kubernetes.io/projected/959b2112-8599-463a-8f23-913766ab1b4e-kube-api-access-87m5l\") pod \"959b2112-8599-463a-8f23-913766ab1b4e\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.988388 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"959b2112-8599-463a-8f23-913766ab1b4e\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.988443 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-combined-ca-bundle\") pod \"959b2112-8599-463a-8f23-913766ab1b4e\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.988545 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-config-data\") pod \"959b2112-8599-463a-8f23-913766ab1b4e\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.988580 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-scripts\") pod \"959b2112-8599-463a-8f23-913766ab1b4e\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.988618 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-internal-tls-certs\") pod \"959b2112-8599-463a-8f23-913766ab1b4e\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.988635 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-logs\") pod \"959b2112-8599-463a-8f23-913766ab1b4e\" (UID: \"959b2112-8599-463a-8f23-913766ab1b4e\") " Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.988642 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "959b2112-8599-463a-8f23-913766ab1b4e" (UID: "959b2112-8599-463a-8f23-913766ab1b4e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.989105 4840 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:20 crc kubenswrapper[4840]: I1209 17:19:20.989419 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-logs" (OuterVolumeSpecName: "logs") pod "959b2112-8599-463a-8f23-913766ab1b4e" (UID: "959b2112-8599-463a-8f23-913766ab1b4e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.002076 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-scripts" (OuterVolumeSpecName: "scripts") pod "959b2112-8599-463a-8f23-913766ab1b4e" (UID: "959b2112-8599-463a-8f23-913766ab1b4e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.018186 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/959b2112-8599-463a-8f23-913766ab1b4e-kube-api-access-87m5l" (OuterVolumeSpecName: "kube-api-access-87m5l") pod "959b2112-8599-463a-8f23-913766ab1b4e" (UID: "959b2112-8599-463a-8f23-913766ab1b4e"). InnerVolumeSpecName "kube-api-access-87m5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.028816 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "959b2112-8599-463a-8f23-913766ab1b4e" (UID: "959b2112-8599-463a-8f23-913766ab1b4e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.081367 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "959b2112-8599-463a-8f23-913766ab1b4e" (UID: "959b2112-8599-463a-8f23-913766ab1b4e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.085511 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc" (OuterVolumeSpecName: "glance") pod "959b2112-8599-463a-8f23-913766ab1b4e" (UID: "959b2112-8599-463a-8f23-913766ab1b4e"). InnerVolumeSpecName "pvc-f0e7e026-17da-49b6-acf8-808958fa33bc". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.090795 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87m5l\" (UniqueName: \"kubernetes.io/projected/959b2112-8599-463a-8f23-913766ab1b4e-kube-api-access-87m5l\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.090864 4840 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") on node \"crc\" " Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.090882 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.090894 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.090904 4840 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.090918 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/959b2112-8599-463a-8f23-913766ab1b4e-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.121103 4840 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.121259 4840 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f0e7e026-17da-49b6-acf8-808958fa33bc" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc") on node "crc" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.129275 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-config-data" (OuterVolumeSpecName: "config-data") pod "959b2112-8599-463a-8f23-913766ab1b4e" (UID: "959b2112-8599-463a-8f23-913766ab1b4e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.192556 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/959b2112-8599-463a-8f23-913766ab1b4e-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.192590 4840 reconciler_common.go:293] "Volume detached for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.302612 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.379301 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.715216 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.715201 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"959b2112-8599-463a-8f23-913766ab1b4e","Type":"ContainerDied","Data":"3149cc9e5d10bc6cb9820f1a3bf4ebf8f7dd2972dae4b61b214c9f38621967bc"} Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.715708 4840 scope.go:117] "RemoveContainer" containerID="cd11d777ab731590b573139b2ee8a9010d67ab18f1910043d9dda367457ab892" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.758326 4840 scope.go:117] "RemoveContainer" containerID="67e53271296fc29a725fea28590583942bc9bfa4001eb05667e60db577c26c08" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.763800 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.785024 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.815630 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:19:21 crc kubenswrapper[4840]: E1209 17:19:21.816165 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="959b2112-8599-463a-8f23-913766ab1b4e" containerName="glance-log" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.816182 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="959b2112-8599-463a-8f23-913766ab1b4e" containerName="glance-log" Dec 09 17:19:21 crc kubenswrapper[4840]: E1209 17:19:21.816194 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="959b2112-8599-463a-8f23-913766ab1b4e" containerName="glance-httpd" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.816200 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="959b2112-8599-463a-8f23-913766ab1b4e" containerName="glance-httpd" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.816396 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="959b2112-8599-463a-8f23-913766ab1b4e" containerName="glance-httpd" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.816425 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="959b2112-8599-463a-8f23-913766ab1b4e" containerName="glance-log" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.817477 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.822020 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.822212 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.826286 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.907457 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.907545 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0ae83718-1999-4231-89c7-aac5ea4f930a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.907579 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.907665 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.907700 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.907749 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.907852 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6srwd\" (UniqueName: \"kubernetes.io/projected/0ae83718-1999-4231-89c7-aac5ea4f930a-kube-api-access-6srwd\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:21 crc kubenswrapper[4840]: I1209 17:19:21.907954 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ae83718-1999-4231-89c7-aac5ea4f930a-logs\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.009298 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.009358 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.009402 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.009457 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6srwd\" (UniqueName: \"kubernetes.io/projected/0ae83718-1999-4231-89c7-aac5ea4f930a-kube-api-access-6srwd\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.009513 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ae83718-1999-4231-89c7-aac5ea4f930a-logs\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.009600 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.009642 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0ae83718-1999-4231-89c7-aac5ea4f930a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.009668 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.010237 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0ae83718-1999-4231-89c7-aac5ea4f930a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.010271 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ae83718-1999-4231-89c7-aac5ea4f930a-logs\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.013724 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.013758 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e28d7941ce676756577bb740fab8e92889d07f3d1c4bbddbbdbd9c7d965e46e3/globalmount\"" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.016351 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.021125 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.021312 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.021425 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ae83718-1999-4231-89c7-aac5ea4f930a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.032706 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6srwd\" (UniqueName: \"kubernetes.io/projected/0ae83718-1999-4231-89c7-aac5ea4f930a-kube-api-access-6srwd\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.095534 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0e7e026-17da-49b6-acf8-808958fa33bc\") pod \"glance-default-internal-api-0\" (UID: \"0ae83718-1999-4231-89c7-aac5ea4f930a\") " pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.138319 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.420025 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.526916 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-config-data\") pod \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.527089 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-sg-core-conf-yaml\") pod \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.527138 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-scripts\") pod \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.527165 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-run-httpd\") pod \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.527340 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-combined-ca-bundle\") pod \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.527437 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-log-httpd\") pod \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.527478 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7sscp\" (UniqueName: \"kubernetes.io/projected/7a8bbd7a-fea2-469d-beac-447fd8b9f308-kube-api-access-7sscp\") pod \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\" (UID: \"7a8bbd7a-fea2-469d-beac-447fd8b9f308\") " Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.528713 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7a8bbd7a-fea2-469d-beac-447fd8b9f308" (UID: "7a8bbd7a-fea2-469d-beac-447fd8b9f308"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.529380 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7a8bbd7a-fea2-469d-beac-447fd8b9f308" (UID: "7a8bbd7a-fea2-469d-beac-447fd8b9f308"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.531332 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a8bbd7a-fea2-469d-beac-447fd8b9f308-kube-api-access-7sscp" (OuterVolumeSpecName: "kube-api-access-7sscp") pod "7a8bbd7a-fea2-469d-beac-447fd8b9f308" (UID: "7a8bbd7a-fea2-469d-beac-447fd8b9f308"). InnerVolumeSpecName "kube-api-access-7sscp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.534340 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-scripts" (OuterVolumeSpecName: "scripts") pod "7a8bbd7a-fea2-469d-beac-447fd8b9f308" (UID: "7a8bbd7a-fea2-469d-beac-447fd8b9f308"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.560121 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7a8bbd7a-fea2-469d-beac-447fd8b9f308" (UID: "7a8bbd7a-fea2-469d-beac-447fd8b9f308"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.622078 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a8bbd7a-fea2-469d-beac-447fd8b9f308" (UID: "7a8bbd7a-fea2-469d-beac-447fd8b9f308"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.624327 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="959b2112-8599-463a-8f23-913766ab1b4e" path="/var/lib/kubelet/pods/959b2112-8599-463a-8f23-913766ab1b4e/volumes" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.629414 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.629446 4840 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.629460 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7sscp\" (UniqueName: \"kubernetes.io/projected/7a8bbd7a-fea2-469d-beac-447fd8b9f308-kube-api-access-7sscp\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.629470 4840 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.629480 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.629489 4840 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a8bbd7a-fea2-469d-beac-447fd8b9f308-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.649913 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-config-data" (OuterVolumeSpecName: "config-data") pod "7a8bbd7a-fea2-469d-beac-447fd8b9f308" (UID: "7a8bbd7a-fea2-469d-beac-447fd8b9f308"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.730954 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a8bbd7a-fea2-469d-beac-447fd8b9f308-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.732535 4840 generic.go:334] "Generic (PLEG): container finished" podID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerID="67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c" exitCode=0 Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.733262 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.733807 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a8bbd7a-fea2-469d-beac-447fd8b9f308","Type":"ContainerDied","Data":"67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c"} Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.733849 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a8bbd7a-fea2-469d-beac-447fd8b9f308","Type":"ContainerDied","Data":"a6dadae68c510cfbab025a993a3727131a9f9f03651f79171e8a6b8ae78ec7dd"} Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.733871 4840 scope.go:117] "RemoveContainer" containerID="ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.763707 4840 scope.go:117] "RemoveContainer" containerID="d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.770606 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.790316 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.819514 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.842391 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:22 crc kubenswrapper[4840]: E1209 17:19:22.842937 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="sg-core" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.842973 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="sg-core" Dec 09 17:19:22 crc kubenswrapper[4840]: E1209 17:19:22.842989 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="ceilometer-central-agent" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.842996 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="ceilometer-central-agent" Dec 09 17:19:22 crc kubenswrapper[4840]: E1209 17:19:22.843006 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="proxy-httpd" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.843015 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="proxy-httpd" Dec 09 17:19:22 crc kubenswrapper[4840]: E1209 17:19:22.843052 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="ceilometer-notification-agent" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.843057 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="ceilometer-notification-agent" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.843246 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="ceilometer-central-agent" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.843277 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="sg-core" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.843291 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="proxy-httpd" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.843305 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" containerName="ceilometer-notification-agent" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.845353 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.848119 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.850384 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.868469 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.882837 4840 scope.go:117] "RemoveContainer" containerID="b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.906743 4840 scope.go:117] "RemoveContainer" containerID="67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.980391 4840 scope.go:117] "RemoveContainer" containerID="ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d" Dec 09 17:19:22 crc kubenswrapper[4840]: E1209 17:19:22.980837 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d\": container with ID starting with ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d not found: ID does not exist" containerID="ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.980868 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d"} err="failed to get container status \"ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d\": rpc error: code = NotFound desc = could not find container \"ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d\": container with ID starting with ac63af0b95579f08cb96270480058abbe9dbf4dba92a777e0f09a422564cc93d not found: ID does not exist" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.980894 4840 scope.go:117] "RemoveContainer" containerID="d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca" Dec 09 17:19:22 crc kubenswrapper[4840]: E1209 17:19:22.981599 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca\": container with ID starting with d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca not found: ID does not exist" containerID="d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.981626 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca"} err="failed to get container status \"d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca\": rpc error: code = NotFound desc = could not find container \"d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca\": container with ID starting with d0651e93c31000600936c4d20fcc55f841b568a9c7e79764c400f2d6e67266ca not found: ID does not exist" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.981645 4840 scope.go:117] "RemoveContainer" containerID="b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de" Dec 09 17:19:22 crc kubenswrapper[4840]: E1209 17:19:22.982011 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de\": container with ID starting with b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de not found: ID does not exist" containerID="b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.982032 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de"} err="failed to get container status \"b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de\": rpc error: code = NotFound desc = could not find container \"b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de\": container with ID starting with b77cda020086d973d81b2104c61b2c838026948ec20b2b4fcb8e59628aea91de not found: ID does not exist" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.982054 4840 scope.go:117] "RemoveContainer" containerID="67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c" Dec 09 17:19:22 crc kubenswrapper[4840]: E1209 17:19:22.982626 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c\": container with ID starting with 67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c not found: ID does not exist" containerID="67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c" Dec 09 17:19:22 crc kubenswrapper[4840]: I1209 17:19:22.982653 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c"} err="failed to get container status \"67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c\": rpc error: code = NotFound desc = could not find container \"67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c\": container with ID starting with 67783824981d697c2930fb683bef8387224ee388c4bd103502b85e202934418c not found: ID does not exist" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.039735 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-config-data\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.039806 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-scripts\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.040131 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-log-httpd\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.040277 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-run-httpd\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.040305 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.040379 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9224c\" (UniqueName: \"kubernetes.io/projected/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-kube-api-access-9224c\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.040403 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.142198 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-log-httpd\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.142297 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-run-httpd\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.142319 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.142383 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9224c\" (UniqueName: \"kubernetes.io/projected/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-kube-api-access-9224c\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.142420 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.142487 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-config-data\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.142537 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-scripts\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.142700 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-log-httpd\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.142949 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-run-httpd\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.148892 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.151154 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-scripts\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.152711 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.153640 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-config-data\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.161306 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9224c\" (UniqueName: \"kubernetes.io/projected/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-kube-api-access-9224c\") pod \"ceilometer-0\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.178518 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.758391 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.796482 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0ae83718-1999-4231-89c7-aac5ea4f930a","Type":"ContainerStarted","Data":"0c52d26596aad726fb798369a46fb91c3704d8c3f707b83716e94f10ce9ffa9b"} Dec 09 17:19:23 crc kubenswrapper[4840]: I1209 17:19:23.796527 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0ae83718-1999-4231-89c7-aac5ea4f930a","Type":"ContainerStarted","Data":"a3e31c39bbd0748772a2c242a21f219bdb04dcd750ba8fb84114925ad8c33b0f"} Dec 09 17:19:24 crc kubenswrapper[4840]: I1209 17:19:24.627841 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a8bbd7a-fea2-469d-beac-447fd8b9f308" path="/var/lib/kubelet/pods/7a8bbd7a-fea2-469d-beac-447fd8b9f308/volumes" Dec 09 17:19:24 crc kubenswrapper[4840]: I1209 17:19:24.809562 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0ae83718-1999-4231-89c7-aac5ea4f930a","Type":"ContainerStarted","Data":"95b38856761ee52d33711804c7c73bec78681acb5de1048efab5010c1349190a"} Dec 09 17:19:24 crc kubenswrapper[4840]: I1209 17:19:24.812409 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead","Type":"ContainerStarted","Data":"887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260"} Dec 09 17:19:24 crc kubenswrapper[4840]: I1209 17:19:24.812459 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead","Type":"ContainerStarted","Data":"4cdf5893e76f62c3bbefd3ccef4d56140486731d8b596dfa16b36455176dcb31"} Dec 09 17:19:24 crc kubenswrapper[4840]: I1209 17:19:24.843137 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.843116815 podStartE2EDuration="3.843116815s" podCreationTimestamp="2025-12-09 17:19:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:19:24.827518377 +0000 UTC m=+1350.818629030" watchObservedRunningTime="2025-12-09 17:19:24.843116815 +0000 UTC m=+1350.834227458" Dec 09 17:19:30 crc kubenswrapper[4840]: I1209 17:19:30.882938 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-bwmsc" event={"ID":"9eaf9500-68c1-4f32-a00e-693c221c4cb0","Type":"ContainerStarted","Data":"f856c0fb1f20af820ef9dfd98d73ce354e134758809cda0c796cd54cdd1f7705"} Dec 09 17:19:30 crc kubenswrapper[4840]: I1209 17:19:30.887120 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead","Type":"ContainerStarted","Data":"4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370"} Dec 09 17:19:31 crc kubenswrapper[4840]: I1209 17:19:31.900737 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead","Type":"ContainerStarted","Data":"7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755"} Dec 09 17:19:32 crc kubenswrapper[4840]: I1209 17:19:32.139412 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:32 crc kubenswrapper[4840]: I1209 17:19:32.139485 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:32 crc kubenswrapper[4840]: I1209 17:19:32.194286 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:32 crc kubenswrapper[4840]: I1209 17:19:32.198865 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:32 crc kubenswrapper[4840]: I1209 17:19:32.220404 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-bwmsc" podStartSLOduration=2.972828111 podStartE2EDuration="13.220387635s" podCreationTimestamp="2025-12-09 17:19:19 +0000 UTC" firstStartedPulling="2025-12-09 17:19:20.301287404 +0000 UTC m=+1346.292398037" lastFinishedPulling="2025-12-09 17:19:30.548846928 +0000 UTC m=+1356.539957561" observedRunningTime="2025-12-09 17:19:30.898924479 +0000 UTC m=+1356.890035112" watchObservedRunningTime="2025-12-09 17:19:32.220387635 +0000 UTC m=+1358.211498268" Dec 09 17:19:32 crc kubenswrapper[4840]: I1209 17:19:32.909397 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:32 crc kubenswrapper[4840]: I1209 17:19:32.909444 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:35 crc kubenswrapper[4840]: I1209 17:19:35.943222 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead","Type":"ContainerStarted","Data":"510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238"} Dec 09 17:19:35 crc kubenswrapper[4840]: I1209 17:19:35.943644 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 17:19:35 crc kubenswrapper[4840]: I1209 17:19:35.964282 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.686943194 podStartE2EDuration="13.964262195s" podCreationTimestamp="2025-12-09 17:19:22 +0000 UTC" firstStartedPulling="2025-12-09 17:19:23.791410549 +0000 UTC m=+1349.782521182" lastFinishedPulling="2025-12-09 17:19:33.06872955 +0000 UTC m=+1359.059840183" observedRunningTime="2025-12-09 17:19:35.964023598 +0000 UTC m=+1361.955134241" watchObservedRunningTime="2025-12-09 17:19:35.964262195 +0000 UTC m=+1361.955372828" Dec 09 17:19:37 crc kubenswrapper[4840]: I1209 17:19:37.256571 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:37 crc kubenswrapper[4840]: I1209 17:19:37.257129 4840 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 17:19:37 crc kubenswrapper[4840]: I1209 17:19:37.265712 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 09 17:19:41 crc kubenswrapper[4840]: I1209 17:19:41.768780 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:41 crc kubenswrapper[4840]: I1209 17:19:41.769600 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="ceilometer-central-agent" containerID="cri-o://887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260" gracePeriod=30 Dec 09 17:19:41 crc kubenswrapper[4840]: I1209 17:19:41.769672 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="sg-core" containerID="cri-o://7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755" gracePeriod=30 Dec 09 17:19:41 crc kubenswrapper[4840]: I1209 17:19:41.769727 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="ceilometer-notification-agent" containerID="cri-o://4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370" gracePeriod=30 Dec 09 17:19:41 crc kubenswrapper[4840]: I1209 17:19:41.769807 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="proxy-httpd" containerID="cri-o://510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238" gracePeriod=30 Dec 09 17:19:42 crc kubenswrapper[4840]: I1209 17:19:42.012110 4840 generic.go:334] "Generic (PLEG): container finished" podID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerID="510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238" exitCode=0 Dec 09 17:19:42 crc kubenswrapper[4840]: I1209 17:19:42.012345 4840 generic.go:334] "Generic (PLEG): container finished" podID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerID="7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755" exitCode=2 Dec 09 17:19:42 crc kubenswrapper[4840]: I1209 17:19:42.012187 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead","Type":"ContainerDied","Data":"510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238"} Dec 09 17:19:42 crc kubenswrapper[4840]: I1209 17:19:42.012382 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead","Type":"ContainerDied","Data":"7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755"} Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.027497 4840 generic.go:334] "Generic (PLEG): container finished" podID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerID="887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260" exitCode=0 Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.027568 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead","Type":"ContainerDied","Data":"887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260"} Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.853352 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.906471 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9224c\" (UniqueName: \"kubernetes.io/projected/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-kube-api-access-9224c\") pod \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.906524 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-run-httpd\") pod \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.906578 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-config-data\") pod \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.906616 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-sg-core-conf-yaml\") pod \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.906638 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-scripts\") pod \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.907140 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" (UID: "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.912361 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-scripts" (OuterVolumeSpecName: "scripts") pod "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" (UID: "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.915227 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-kube-api-access-9224c" (OuterVolumeSpecName: "kube-api-access-9224c") pod "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" (UID: "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead"). InnerVolumeSpecName "kube-api-access-9224c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:43 crc kubenswrapper[4840]: I1209 17:19:43.935191 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" (UID: "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.004271 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-config-data" (OuterVolumeSpecName: "config-data") pod "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" (UID: "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.008588 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-log-httpd\") pod \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.008680 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-combined-ca-bundle\") pod \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\" (UID: \"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead\") " Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.009104 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" (UID: "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.009486 4840 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.009518 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9224c\" (UniqueName: \"kubernetes.io/projected/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-kube-api-access-9224c\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.009541 4840 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.009560 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.009579 4840 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.009596 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.040534 4840 generic.go:334] "Generic (PLEG): container finished" podID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerID="4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370" exitCode=0 Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.040578 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead","Type":"ContainerDied","Data":"4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370"} Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.040605 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fc4a13b-cbf7-4ecc-86bc-7ef530967ead","Type":"ContainerDied","Data":"4cdf5893e76f62c3bbefd3ccef4d56140486731d8b596dfa16b36455176dcb31"} Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.040622 4840 scope.go:117] "RemoveContainer" containerID="510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.040749 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.074629 4840 scope.go:117] "RemoveContainer" containerID="7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.093455 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" (UID: "6fc4a13b-cbf7-4ecc-86bc-7ef530967ead"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.097072 4840 scope.go:117] "RemoveContainer" containerID="4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.112215 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.116209 4840 scope.go:117] "RemoveContainer" containerID="887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.138812 4840 scope.go:117] "RemoveContainer" containerID="510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238" Dec 09 17:19:44 crc kubenswrapper[4840]: E1209 17:19:44.139231 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238\": container with ID starting with 510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238 not found: ID does not exist" containerID="510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.139277 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238"} err="failed to get container status \"510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238\": rpc error: code = NotFound desc = could not find container \"510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238\": container with ID starting with 510c8e45f555bf9dfcd00dd29b25c9a3f2efa81eb099f1b75814da747f13a238 not found: ID does not exist" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.139307 4840 scope.go:117] "RemoveContainer" containerID="7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755" Dec 09 17:19:44 crc kubenswrapper[4840]: E1209 17:19:44.139913 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755\": container with ID starting with 7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755 not found: ID does not exist" containerID="7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.139986 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755"} err="failed to get container status \"7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755\": rpc error: code = NotFound desc = could not find container \"7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755\": container with ID starting with 7edbdc2b0042a8072f4e2c4bb893888f5b0740a7554851f26884d23aa0eed755 not found: ID does not exist" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.140017 4840 scope.go:117] "RemoveContainer" containerID="4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370" Dec 09 17:19:44 crc kubenswrapper[4840]: E1209 17:19:44.141801 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370\": container with ID starting with 4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370 not found: ID does not exist" containerID="4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.141823 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370"} err="failed to get container status \"4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370\": rpc error: code = NotFound desc = could not find container \"4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370\": container with ID starting with 4c997c1b58e11d6c3085e3c0547708bfea7868003850348c4f162f22f953f370 not found: ID does not exist" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.141860 4840 scope.go:117] "RemoveContainer" containerID="887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260" Dec 09 17:19:44 crc kubenswrapper[4840]: E1209 17:19:44.142192 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260\": container with ID starting with 887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260 not found: ID does not exist" containerID="887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.142229 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260"} err="failed to get container status \"887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260\": rpc error: code = NotFound desc = could not find container \"887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260\": container with ID starting with 887676562d0b3a6e2933bcafbf9953235bfd818557f178491b11c8454e3f7260 not found: ID does not exist" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.377997 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.393717 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.408516 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:44 crc kubenswrapper[4840]: E1209 17:19:44.409177 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="ceilometer-notification-agent" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.409293 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="ceilometer-notification-agent" Dec 09 17:19:44 crc kubenswrapper[4840]: E1209 17:19:44.409403 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="sg-core" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.409473 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="sg-core" Dec 09 17:19:44 crc kubenswrapper[4840]: E1209 17:19:44.409561 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="proxy-httpd" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.409635 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="proxy-httpd" Dec 09 17:19:44 crc kubenswrapper[4840]: E1209 17:19:44.409705 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="ceilometer-central-agent" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.409765 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="ceilometer-central-agent" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.410061 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="proxy-httpd" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.410160 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="sg-core" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.410227 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="ceilometer-central-agent" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.410300 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" containerName="ceilometer-notification-agent" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.412339 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.416342 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.416780 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-scripts\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.416863 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp7bh\" (UniqueName: \"kubernetes.io/projected/5a9c8223-82eb-43fd-b848-95010af7d16d-kube-api-access-qp7bh\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.416911 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-run-httpd\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.416976 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.417100 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-log-httpd\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.417190 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-config-data\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.417455 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.423510 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.426514 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.519488 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-log-httpd\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.519564 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-config-data\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.519615 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.519637 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-scripts\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.519667 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp7bh\" (UniqueName: \"kubernetes.io/projected/5a9c8223-82eb-43fd-b848-95010af7d16d-kube-api-access-qp7bh\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.519726 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-run-httpd\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.519772 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.520035 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-log-httpd\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.520366 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-run-httpd\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.523004 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-scripts\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.523216 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.523291 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-config-data\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.527415 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.538795 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp7bh\" (UniqueName: \"kubernetes.io/projected/5a9c8223-82eb-43fd-b848-95010af7d16d-kube-api-access-qp7bh\") pod \"ceilometer-0\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " pod="openstack/ceilometer-0" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.649142 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fc4a13b-cbf7-4ecc-86bc-7ef530967ead" path="/var/lib/kubelet/pods/6fc4a13b-cbf7-4ecc-86bc-7ef530967ead/volumes" Dec 09 17:19:44 crc kubenswrapper[4840]: I1209 17:19:44.744047 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:19:45 crc kubenswrapper[4840]: I1209 17:19:45.257889 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:19:46 crc kubenswrapper[4840]: I1209 17:19:46.074492 4840 generic.go:334] "Generic (PLEG): container finished" podID="9eaf9500-68c1-4f32-a00e-693c221c4cb0" containerID="f856c0fb1f20af820ef9dfd98d73ce354e134758809cda0c796cd54cdd1f7705" exitCode=0 Dec 09 17:19:46 crc kubenswrapper[4840]: I1209 17:19:46.075034 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-bwmsc" event={"ID":"9eaf9500-68c1-4f32-a00e-693c221c4cb0","Type":"ContainerDied","Data":"f856c0fb1f20af820ef9dfd98d73ce354e134758809cda0c796cd54cdd1f7705"} Dec 09 17:19:46 crc kubenswrapper[4840]: I1209 17:19:46.077564 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a9c8223-82eb-43fd-b848-95010af7d16d","Type":"ContainerStarted","Data":"52e72cfb19db1cc28121ed9046e7996ed89051ecb6e7e82a38bbe641c145cf2a"} Dec 09 17:19:46 crc kubenswrapper[4840]: I1209 17:19:46.077606 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a9c8223-82eb-43fd-b848-95010af7d16d","Type":"ContainerStarted","Data":"9421e0718479cd7ae9f4f50cdb9010f0d34441319e0005734f340a34965a50c2"} Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.088692 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a9c8223-82eb-43fd-b848-95010af7d16d","Type":"ContainerStarted","Data":"36a573364ef107e928e9057bef7b05b1f72daca22d81528fdece7a579d4f2eb3"} Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.528895 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.689040 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvlk6\" (UniqueName: \"kubernetes.io/projected/9eaf9500-68c1-4f32-a00e-693c221c4cb0-kube-api-access-dvlk6\") pod \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.689379 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-combined-ca-bundle\") pod \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.689480 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-config-data\") pod \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.689818 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-scripts\") pod \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.696429 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-scripts" (OuterVolumeSpecName: "scripts") pod "9eaf9500-68c1-4f32-a00e-693c221c4cb0" (UID: "9eaf9500-68c1-4f32-a00e-693c221c4cb0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.702149 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9eaf9500-68c1-4f32-a00e-693c221c4cb0-kube-api-access-dvlk6" (OuterVolumeSpecName: "kube-api-access-dvlk6") pod "9eaf9500-68c1-4f32-a00e-693c221c4cb0" (UID: "9eaf9500-68c1-4f32-a00e-693c221c4cb0"). InnerVolumeSpecName "kube-api-access-dvlk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:19:47 crc kubenswrapper[4840]: E1209 17:19:47.731268 4840 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-combined-ca-bundle podName:9eaf9500-68c1-4f32-a00e-693c221c4cb0 nodeName:}" failed. No retries permitted until 2025-12-09 17:19:48.231241072 +0000 UTC m=+1374.222351705 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-combined-ca-bundle") pod "9eaf9500-68c1-4f32-a00e-693c221c4cb0" (UID: "9eaf9500-68c1-4f32-a00e-693c221c4cb0") : error deleting /var/lib/kubelet/pods/9eaf9500-68c1-4f32-a00e-693c221c4cb0/volume-subpaths: remove /var/lib/kubelet/pods/9eaf9500-68c1-4f32-a00e-693c221c4cb0/volume-subpaths: no such file or directory Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.733249 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-config-data" (OuterVolumeSpecName: "config-data") pod "9eaf9500-68c1-4f32-a00e-693c221c4cb0" (UID: "9eaf9500-68c1-4f32-a00e-693c221c4cb0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.795736 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvlk6\" (UniqueName: \"kubernetes.io/projected/9eaf9500-68c1-4f32-a00e-693c221c4cb0-kube-api-access-dvlk6\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.797803 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:47 crc kubenswrapper[4840]: I1209 17:19:47.797830 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.100379 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a9c8223-82eb-43fd-b848-95010af7d16d","Type":"ContainerStarted","Data":"48540dacd1732366cf82cd600c22128adfc1dbd5bf924db8d85f35e18bb1007c"} Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.101840 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-bwmsc" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.102141 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-bwmsc" event={"ID":"9eaf9500-68c1-4f32-a00e-693c221c4cb0","Type":"ContainerDied","Data":"03c188dc6601ee18d2688d5c8f02d8974ae4a707ad5101b281a01dd185972465"} Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.102175 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03c188dc6601ee18d2688d5c8f02d8974ae4a707ad5101b281a01dd185972465" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.194862 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 09 17:19:48 crc kubenswrapper[4840]: E1209 17:19:48.195290 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9eaf9500-68c1-4f32-a00e-693c221c4cb0" containerName="nova-cell0-conductor-db-sync" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.195306 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9eaf9500-68c1-4f32-a00e-693c221c4cb0" containerName="nova-cell0-conductor-db-sync" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.195525 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="9eaf9500-68c1-4f32-a00e-693c221c4cb0" containerName="nova-cell0-conductor-db-sync" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.200634 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.209142 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.306815 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-combined-ca-bundle\") pod \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\" (UID: \"9eaf9500-68c1-4f32-a00e-693c221c4cb0\") " Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.307118 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z856f\" (UniqueName: \"kubernetes.io/projected/1a1d1a1a-f16c-4351-b65b-00322ce7929d-kube-api-access-z856f\") pod \"nova-cell0-conductor-0\" (UID: \"1a1d1a1a-f16c-4351-b65b-00322ce7929d\") " pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.307360 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a1d1a1a-f16c-4351-b65b-00322ce7929d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1a1d1a1a-f16c-4351-b65b-00322ce7929d\") " pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.307514 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a1d1a1a-f16c-4351-b65b-00322ce7929d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1a1d1a1a-f16c-4351-b65b-00322ce7929d\") " pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.310662 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9eaf9500-68c1-4f32-a00e-693c221c4cb0" (UID: "9eaf9500-68c1-4f32-a00e-693c221c4cb0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.409712 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z856f\" (UniqueName: \"kubernetes.io/projected/1a1d1a1a-f16c-4351-b65b-00322ce7929d-kube-api-access-z856f\") pod \"nova-cell0-conductor-0\" (UID: \"1a1d1a1a-f16c-4351-b65b-00322ce7929d\") " pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.409828 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a1d1a1a-f16c-4351-b65b-00322ce7929d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1a1d1a1a-f16c-4351-b65b-00322ce7929d\") " pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.409869 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a1d1a1a-f16c-4351-b65b-00322ce7929d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1a1d1a1a-f16c-4351-b65b-00322ce7929d\") " pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.409918 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eaf9500-68c1-4f32-a00e-693c221c4cb0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.413980 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a1d1a1a-f16c-4351-b65b-00322ce7929d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1a1d1a1a-f16c-4351-b65b-00322ce7929d\") " pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.418680 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a1d1a1a-f16c-4351-b65b-00322ce7929d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1a1d1a1a-f16c-4351-b65b-00322ce7929d\") " pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.426051 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z856f\" (UniqueName: \"kubernetes.io/projected/1a1d1a1a-f16c-4351-b65b-00322ce7929d-kube-api-access-z856f\") pod \"nova-cell0-conductor-0\" (UID: \"1a1d1a1a-f16c-4351-b65b-00322ce7929d\") " pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:48 crc kubenswrapper[4840]: I1209 17:19:48.570644 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:49 crc kubenswrapper[4840]: I1209 17:19:49.063295 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 09 17:19:49 crc kubenswrapper[4840]: I1209 17:19:49.112673 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1a1d1a1a-f16c-4351-b65b-00322ce7929d","Type":"ContainerStarted","Data":"287b233a7d3736b4f52a05f72164d6779ebe041fb5f9be8c5b3f3034d6bb796f"} Dec 09 17:19:50 crc kubenswrapper[4840]: I1209 17:19:50.126792 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a9c8223-82eb-43fd-b848-95010af7d16d","Type":"ContainerStarted","Data":"bf76242ddfc463241c160b84973855eadb4f49e3200b6202aaad802a1fdfbc31"} Dec 09 17:19:50 crc kubenswrapper[4840]: I1209 17:19:50.127039 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 17:19:50 crc kubenswrapper[4840]: I1209 17:19:50.129677 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1a1d1a1a-f16c-4351-b65b-00322ce7929d","Type":"ContainerStarted","Data":"91d661f3d06b27b7838307025748bf6732acbc12dfd693606e733386be5d9f47"} Dec 09 17:19:50 crc kubenswrapper[4840]: I1209 17:19:50.129871 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:50 crc kubenswrapper[4840]: I1209 17:19:50.176009 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.443921696 podStartE2EDuration="6.17595523s" podCreationTimestamp="2025-12-09 17:19:44 +0000 UTC" firstStartedPulling="2025-12-09 17:19:45.257910292 +0000 UTC m=+1371.249020935" lastFinishedPulling="2025-12-09 17:19:48.989943836 +0000 UTC m=+1374.981054469" observedRunningTime="2025-12-09 17:19:50.151360942 +0000 UTC m=+1376.142471585" watchObservedRunningTime="2025-12-09 17:19:50.17595523 +0000 UTC m=+1376.167065883" Dec 09 17:19:50 crc kubenswrapper[4840]: I1209 17:19:50.188414 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.188367092 podStartE2EDuration="2.188367092s" podCreationTimestamp="2025-12-09 17:19:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:19:50.177403271 +0000 UTC m=+1376.168513944" watchObservedRunningTime="2025-12-09 17:19:50.188367092 +0000 UTC m=+1376.179477745" Dec 09 17:19:58 crc kubenswrapper[4840]: I1209 17:19:58.640708 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.100107 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-f4pbw"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.101788 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.107292 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.112441 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.114402 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-f4pbw"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.223820 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-scripts\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.223891 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.224827 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vhhp\" (UniqueName: \"kubernetes.io/projected/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-kube-api-access-5vhhp\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.225382 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-config-data\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.320957 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.324475 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.327171 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-scripts\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.327217 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.327286 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vhhp\" (UniqueName: \"kubernetes.io/projected/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-kube-api-access-5vhhp\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.327314 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-config-data\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.331590 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.339078 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-scripts\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.340286 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.361796 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.366707 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-config-data\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.399801 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vhhp\" (UniqueName: \"kubernetes.io/projected/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-kube-api-access-5vhhp\") pod \"nova-cell0-cell-mapping-f4pbw\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.429472 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qvp5\" (UniqueName: \"kubernetes.io/projected/c99bfc51-b110-4fca-a9eb-cf517044c149-kube-api-access-5qvp5\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.429573 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-config-data\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.429624 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.429671 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c99bfc51-b110-4fca-a9eb-cf517044c149-logs\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.473095 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.474468 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.486482 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.502208 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.531380 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c99bfc51-b110-4fca-a9eb-cf517044c149-logs\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.531461 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qvp5\" (UniqueName: \"kubernetes.io/projected/c99bfc51-b110-4fca-a9eb-cf517044c149-kube-api-access-5qvp5\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.531536 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-config-data\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.531586 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.532863 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c99bfc51-b110-4fca-a9eb-cf517044c149-logs\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.535526 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.546512 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.555890 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-config-data\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.566319 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.570593 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qvp5\" (UniqueName: \"kubernetes.io/projected/c99bfc51-b110-4fca-a9eb-cf517044c149-kube-api-access-5qvp5\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.580710 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.628179 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.633159 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " pod="openstack/nova-scheduler-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.633206 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-config-data\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.633225 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.633270 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fc8p\" (UniqueName: \"kubernetes.io/projected/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-kube-api-access-9fc8p\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.633304 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-logs\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.633336 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hd77\" (UniqueName: \"kubernetes.io/projected/426cbf78-2ebf-4210-a349-e744a967b288-kube-api-access-2hd77\") pod \"nova-scheduler-0\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " pod="openstack/nova-scheduler-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.633377 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-config-data\") pod \"nova-scheduler-0\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " pod="openstack/nova-scheduler-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.662710 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.713400 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.715491 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.719175 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.735459 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " pod="openstack/nova-scheduler-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.735755 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-config-data\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.735775 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.735847 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fc8p\" (UniqueName: \"kubernetes.io/projected/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-kube-api-access-9fc8p\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.735890 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-logs\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.735931 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hd77\" (UniqueName: \"kubernetes.io/projected/426cbf78-2ebf-4210-a349-e744a967b288-kube-api-access-2hd77\") pod \"nova-scheduler-0\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " pod="openstack/nova-scheduler-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.735988 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-config-data\") pod \"nova-scheduler-0\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " pod="openstack/nova-scheduler-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.740896 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-logs\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.741610 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-config-data\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.741682 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " pod="openstack/nova-scheduler-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.744700 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-config-data\") pod \"nova-scheduler-0\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " pod="openstack/nova-scheduler-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.745047 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.750196 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-tvj7t"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.752012 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.758160 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fc8p\" (UniqueName: \"kubernetes.io/projected/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-kube-api-access-9fc8p\") pod \"nova-metadata-0\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " pod="openstack/nova-metadata-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.762491 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hd77\" (UniqueName: \"kubernetes.io/projected/426cbf78-2ebf-4210-a349-e744a967b288-kube-api-access-2hd77\") pod \"nova-scheduler-0\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " pod="openstack/nova-scheduler-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.774565 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.798434 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-tvj7t"] Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.810082 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.828614 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.840471 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.840533 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrwcr\" (UniqueName: \"kubernetes.io/projected/97070aef-f135-4c60-9cae-21a0ae49e77c-kube-api-access-qrwcr\") pod \"nova-cell1-novncproxy-0\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.840575 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.943763 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-nb\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.943847 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-svc\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.943985 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-config\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.944015 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-sb\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.944044 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6skb7\" (UniqueName: \"kubernetes.io/projected/6782d20a-7636-4868-a203-ef8fbf37a6c9-kube-api-access-6skb7\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.944073 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.944097 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-swift-storage-0\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.944160 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrwcr\" (UniqueName: \"kubernetes.io/projected/97070aef-f135-4c60-9cae-21a0ae49e77c-kube-api-access-qrwcr\") pod \"nova-cell1-novncproxy-0\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.944195 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.949808 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.950730 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:19:59 crc kubenswrapper[4840]: I1209 17:19:59.965523 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrwcr\" (UniqueName: \"kubernetes.io/projected/97070aef-f135-4c60-9cae-21a0ae49e77c-kube-api-access-qrwcr\") pod \"nova-cell1-novncproxy-0\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.017856 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.035763 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.046122 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-nb\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.046199 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-svc\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.046310 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-config\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.046337 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-sb\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.046364 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6skb7\" (UniqueName: \"kubernetes.io/projected/6782d20a-7636-4868-a203-ef8fbf37a6c9-kube-api-access-6skb7\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.046393 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-swift-storage-0\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.048220 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-swift-storage-0\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.048430 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-nb\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.048944 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-config\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.050525 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-sb\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.053614 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-svc\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.083128 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6skb7\" (UniqueName: \"kubernetes.io/projected/6782d20a-7636-4868-a203-ef8fbf37a6c9-kube-api-access-6skb7\") pod \"dnsmasq-dns-7c9cb78d75-tvj7t\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.092815 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.193427 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-f4pbw"] Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.249247 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-f4pbw" event={"ID":"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb","Type":"ContainerStarted","Data":"e690d33f1dcbe85482ae0fb7cbf1d3e2edaa01ede558244a7384d9cf1569b4f8"} Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.430947 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-64jbl"] Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.432726 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.435953 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.436236 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.447647 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-64jbl"] Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.482927 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.513331 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.557885 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gn4dg\" (UniqueName: \"kubernetes.io/projected/ce552d37-bf06-4a84-a9a0-111ad1b9698b-kube-api-access-gn4dg\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.557985 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.558063 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-config-data\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.558084 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-scripts\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.660388 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.660514 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-config-data\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.660542 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-scripts\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.661290 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gn4dg\" (UniqueName: \"kubernetes.io/projected/ce552d37-bf06-4a84-a9a0-111ad1b9698b-kube-api-access-gn4dg\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.665203 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-scripts\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.665913 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-config-data\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.665987 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.681549 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gn4dg\" (UniqueName: \"kubernetes.io/projected/ce552d37-bf06-4a84-a9a0-111ad1b9698b-kube-api-access-gn4dg\") pod \"nova-cell1-conductor-db-sync-64jbl\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.708315 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.866494 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-tvj7t"] Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.891104 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:20:00 crc kubenswrapper[4840]: I1209 17:20:00.937823 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:01 crc kubenswrapper[4840]: I1209 17:20:01.265180 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"426cbf78-2ebf-4210-a349-e744a967b288","Type":"ContainerStarted","Data":"d8275abb0e6f37ead9605235a9722fe5d00ba122f671691d06e9a50f2f572224"} Dec 09 17:20:01 crc kubenswrapper[4840]: I1209 17:20:01.279990 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c99bfc51-b110-4fca-a9eb-cf517044c149","Type":"ContainerStarted","Data":"bccb28de6559df1e2f947dafcaae1b9b780928af798b6f27a79e3bf20cbac98f"} Dec 09 17:20:01 crc kubenswrapper[4840]: I1209 17:20:01.283130 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"97070aef-f135-4c60-9cae-21a0ae49e77c","Type":"ContainerStarted","Data":"52f73516de704e4cc7e4c23d64d586602597855d17add269d61d5204f86ae965"} Dec 09 17:20:01 crc kubenswrapper[4840]: I1209 17:20:01.285828 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9","Type":"ContainerStarted","Data":"de4935ed724ff595bc8d7d52d7a74470b756f735390e1537cb48d86b40f66e1d"} Dec 09 17:20:01 crc kubenswrapper[4840]: I1209 17:20:01.304681 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-f4pbw" event={"ID":"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb","Type":"ContainerStarted","Data":"be0aff0214ef7a52186961dc7d97cf3fd19600c913b98730b0dff858c95e82a0"} Dec 09 17:20:01 crc kubenswrapper[4840]: I1209 17:20:01.321370 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-f4pbw" podStartSLOduration=2.3213534239999998 podStartE2EDuration="2.321353424s" podCreationTimestamp="2025-12-09 17:19:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:20:01.321080777 +0000 UTC m=+1387.312191410" watchObservedRunningTime="2025-12-09 17:20:01.321353424 +0000 UTC m=+1387.312464057" Dec 09 17:20:01 crc kubenswrapper[4840]: I1209 17:20:01.323029 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" event={"ID":"6782d20a-7636-4868-a203-ef8fbf37a6c9","Type":"ContainerStarted","Data":"d9982f77d139e4b9b4d33c79f68abe665bd83303dafa3e86656fda9ae42e2d64"} Dec 09 17:20:01 crc kubenswrapper[4840]: I1209 17:20:01.667069 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-64jbl"] Dec 09 17:20:02 crc kubenswrapper[4840]: I1209 17:20:02.353601 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-64jbl" event={"ID":"ce552d37-bf06-4a84-a9a0-111ad1b9698b","Type":"ContainerStarted","Data":"7f7ccb7739c4654a9cb30f9feceba39ea3c5cdb5bb82448b91b77747d2d8188a"} Dec 09 17:20:02 crc kubenswrapper[4840]: I1209 17:20:02.353941 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-64jbl" event={"ID":"ce552d37-bf06-4a84-a9a0-111ad1b9698b","Type":"ContainerStarted","Data":"0e13e06933d577b290c4d6f178e98362c04a5a9f7eca85f92dad0ef0629c385b"} Dec 09 17:20:02 crc kubenswrapper[4840]: I1209 17:20:02.357984 4840 generic.go:334] "Generic (PLEG): container finished" podID="6782d20a-7636-4868-a203-ef8fbf37a6c9" containerID="c5c8e5335c2415386f0615edf91188ab581f383408e7b1f25904ba2b64799181" exitCode=0 Dec 09 17:20:02 crc kubenswrapper[4840]: I1209 17:20:02.361054 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" event={"ID":"6782d20a-7636-4868-a203-ef8fbf37a6c9","Type":"ContainerDied","Data":"c5c8e5335c2415386f0615edf91188ab581f383408e7b1f25904ba2b64799181"} Dec 09 17:20:02 crc kubenswrapper[4840]: I1209 17:20:02.403736 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-64jbl" podStartSLOduration=2.403720507 podStartE2EDuration="2.403720507s" podCreationTimestamp="2025-12-09 17:20:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:20:02.379234933 +0000 UTC m=+1388.370345566" watchObservedRunningTime="2025-12-09 17:20:02.403720507 +0000 UTC m=+1388.394831140" Dec 09 17:20:03 crc kubenswrapper[4840]: I1209 17:20:03.255292 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 17:20:03 crc kubenswrapper[4840]: I1209 17:20:03.269493 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.392621 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" event={"ID":"6782d20a-7636-4868-a203-ef8fbf37a6c9","Type":"ContainerStarted","Data":"b893b05e04e921106f02e79732ef58560a046576a6c901371ed7ef88cfafa644"} Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.393284 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.394923 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"426cbf78-2ebf-4210-a349-e744a967b288","Type":"ContainerStarted","Data":"c290089d2cddba2de1dcebcb2b1a9562357e44839af9b8cb9e6daf6ff2ef5f2b"} Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.398355 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c99bfc51-b110-4fca-a9eb-cf517044c149","Type":"ContainerStarted","Data":"bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531"} Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.398393 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c99bfc51-b110-4fca-a9eb-cf517044c149","Type":"ContainerStarted","Data":"038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607"} Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.400601 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"97070aef-f135-4c60-9cae-21a0ae49e77c","Type":"ContainerStarted","Data":"dfac38e9bea7315daa1778d1b6ca204d863767c42a7eb63a6e83418654c03727"} Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.400718 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="97070aef-f135-4c60-9cae-21a0ae49e77c" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://dfac38e9bea7315daa1778d1b6ca204d863767c42a7eb63a6e83418654c03727" gracePeriod=30 Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.403559 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9","Type":"ContainerStarted","Data":"f050a7733a656aa215535651b2bb521742bad606f66b2d5df288daed44e89b03"} Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.403599 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9","Type":"ContainerStarted","Data":"60a5bc46dd93e8d35dcb489a115f6532bd932b5c75a23866ba58cdd480a6b648"} Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.403710 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" containerName="nova-metadata-log" containerID="cri-o://60a5bc46dd93e8d35dcb489a115f6532bd932b5c75a23866ba58cdd480a6b648" gracePeriod=30 Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.403794 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" containerName="nova-metadata-metadata" containerID="cri-o://f050a7733a656aa215535651b2bb521742bad606f66b2d5df288daed44e89b03" gracePeriod=30 Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.414809 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" podStartSLOduration=6.414788521 podStartE2EDuration="6.414788521s" podCreationTimestamp="2025-12-09 17:19:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:20:05.409987624 +0000 UTC m=+1391.401098257" watchObservedRunningTime="2025-12-09 17:20:05.414788521 +0000 UTC m=+1391.405899154" Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.450724 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.700892781 podStartE2EDuration="6.450455742s" podCreationTimestamp="2025-12-09 17:19:59 +0000 UTC" firstStartedPulling="2025-12-09 17:20:00.882581048 +0000 UTC m=+1386.873691681" lastFinishedPulling="2025-12-09 17:20:04.632144009 +0000 UTC m=+1390.623254642" observedRunningTime="2025-12-09 17:20:05.433133181 +0000 UTC m=+1391.424243814" watchObservedRunningTime="2025-12-09 17:20:05.450455742 +0000 UTC m=+1391.441566385" Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.467518 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.407945861 podStartE2EDuration="6.467497266s" podCreationTimestamp="2025-12-09 17:19:59 +0000 UTC" firstStartedPulling="2025-12-09 17:20:00.515015911 +0000 UTC m=+1386.506126544" lastFinishedPulling="2025-12-09 17:20:04.574567306 +0000 UTC m=+1390.565677949" observedRunningTime="2025-12-09 17:20:05.453466688 +0000 UTC m=+1391.444577311" watchObservedRunningTime="2025-12-09 17:20:05.467497266 +0000 UTC m=+1391.458607909" Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.484036 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.591345733 podStartE2EDuration="6.484016684s" podCreationTimestamp="2025-12-09 17:19:59 +0000 UTC" firstStartedPulling="2025-12-09 17:20:00.727255732 +0000 UTC m=+1386.718366365" lastFinishedPulling="2025-12-09 17:20:04.619926683 +0000 UTC m=+1390.611037316" observedRunningTime="2025-12-09 17:20:05.478687363 +0000 UTC m=+1391.469798006" watchObservedRunningTime="2025-12-09 17:20:05.484016684 +0000 UTC m=+1391.475127317" Dec 09 17:20:05 crc kubenswrapper[4840]: I1209 17:20:05.532794 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.3700895060000002 podStartE2EDuration="6.532771487s" podCreationTimestamp="2025-12-09 17:19:59 +0000 UTC" firstStartedPulling="2025-12-09 17:20:00.474267745 +0000 UTC m=+1386.465378378" lastFinishedPulling="2025-12-09 17:20:04.636949706 +0000 UTC m=+1390.628060359" observedRunningTime="2025-12-09 17:20:05.513235513 +0000 UTC m=+1391.504346146" watchObservedRunningTime="2025-12-09 17:20:05.532771487 +0000 UTC m=+1391.523882130" Dec 09 17:20:06 crc kubenswrapper[4840]: I1209 17:20:06.415791 4840 generic.go:334] "Generic (PLEG): container finished" podID="08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" containerID="60a5bc46dd93e8d35dcb489a115f6532bd932b5c75a23866ba58cdd480a6b648" exitCode=143 Dec 09 17:20:06 crc kubenswrapper[4840]: I1209 17:20:06.415880 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9","Type":"ContainerDied","Data":"60a5bc46dd93e8d35dcb489a115f6532bd932b5c75a23866ba58cdd480a6b648"} Dec 09 17:20:09 crc kubenswrapper[4840]: I1209 17:20:09.464516 4840 generic.go:334] "Generic (PLEG): container finished" podID="4c371bcf-af55-4ef1-a3ad-172b2db7bfbb" containerID="be0aff0214ef7a52186961dc7d97cf3fd19600c913b98730b0dff858c95e82a0" exitCode=0 Dec 09 17:20:09 crc kubenswrapper[4840]: I1209 17:20:09.465198 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-f4pbw" event={"ID":"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb","Type":"ContainerDied","Data":"be0aff0214ef7a52186961dc7d97cf3fd19600c913b98730b0dff858c95e82a0"} Dec 09 17:20:09 crc kubenswrapper[4840]: I1209 17:20:09.810673 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 17:20:09 crc kubenswrapper[4840]: I1209 17:20:09.810741 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 17:20:09 crc kubenswrapper[4840]: I1209 17:20:09.830205 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 09 17:20:09 crc kubenswrapper[4840]: I1209 17:20:09.830258 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 09 17:20:09 crc kubenswrapper[4840]: I1209 17:20:09.864699 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.018701 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.018755 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.036727 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.095192 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.183574 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-d5bzm"] Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.183829 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" podUID="83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" containerName="dnsmasq-dns" containerID="cri-o://c7bae538a1761ed67fd1d8c5b8a3afebfaca317d475d3bce7cc691a15f4ddb6f" gracePeriod=10 Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.504816 4840 generic.go:334] "Generic (PLEG): container finished" podID="83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" containerID="c7bae538a1761ed67fd1d8c5b8a3afebfaca317d475d3bce7cc691a15f4ddb6f" exitCode=0 Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.505254 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" event={"ID":"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f","Type":"ContainerDied","Data":"c7bae538a1761ed67fd1d8c5b8a3afebfaca317d475d3bce7cc691a15f4ddb6f"} Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.510221 4840 generic.go:334] "Generic (PLEG): container finished" podID="ce552d37-bf06-4a84-a9a0-111ad1b9698b" containerID="7f7ccb7739c4654a9cb30f9feceba39ea3c5cdb5bb82448b91b77747d2d8188a" exitCode=0 Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.511088 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-64jbl" event={"ID":"ce552d37-bf06-4a84-a9a0-111ad1b9698b","Type":"ContainerDied","Data":"7f7ccb7739c4654a9cb30f9feceba39ea3c5cdb5bb82448b91b77747d2d8188a"} Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.558250 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.900519 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.207:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.901335 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.207:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 17:20:10 crc kubenswrapper[4840]: I1209 17:20:10.906975 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.019354 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-swift-storage-0\") pod \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.019513 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-sb\") pod \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.019554 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-svc\") pod \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.019571 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrfqr\" (UniqueName: \"kubernetes.io/projected/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-kube-api-access-lrfqr\") pod \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.019626 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-nb\") pod \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.019689 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-config\") pod \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\" (UID: \"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f\") " Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.052220 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-kube-api-access-lrfqr" (OuterVolumeSpecName: "kube-api-access-lrfqr") pod "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" (UID: "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f"). InnerVolumeSpecName "kube-api-access-lrfqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.111654 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" (UID: "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.121861 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.121898 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrfqr\" (UniqueName: \"kubernetes.io/projected/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-kube-api-access-lrfqr\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.125471 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" (UID: "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.130707 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-config" (OuterVolumeSpecName: "config") pod "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" (UID: "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.135508 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" (UID: "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.173694 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" (UID: "83d7cd4e-4c2a-4a12-aa7d-44266ec4139f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.177289 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.223505 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.223536 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.223547 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.223556 4840 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.324376 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vhhp\" (UniqueName: \"kubernetes.io/projected/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-kube-api-access-5vhhp\") pod \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.324496 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-scripts\") pod \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.324544 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-config-data\") pod \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.324750 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-combined-ca-bundle\") pod \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\" (UID: \"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb\") " Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.331070 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-scripts" (OuterVolumeSpecName: "scripts") pod "4c371bcf-af55-4ef1-a3ad-172b2db7bfbb" (UID: "4c371bcf-af55-4ef1-a3ad-172b2db7bfbb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.333362 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-kube-api-access-5vhhp" (OuterVolumeSpecName: "kube-api-access-5vhhp") pod "4c371bcf-af55-4ef1-a3ad-172b2db7bfbb" (UID: "4c371bcf-af55-4ef1-a3ad-172b2db7bfbb"). InnerVolumeSpecName "kube-api-access-5vhhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.354081 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c371bcf-af55-4ef1-a3ad-172b2db7bfbb" (UID: "4c371bcf-af55-4ef1-a3ad-172b2db7bfbb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.367577 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-config-data" (OuterVolumeSpecName: "config-data") pod "4c371bcf-af55-4ef1-a3ad-172b2db7bfbb" (UID: "4c371bcf-af55-4ef1-a3ad-172b2db7bfbb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.426725 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vhhp\" (UniqueName: \"kubernetes.io/projected/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-kube-api-access-5vhhp\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.426759 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.426769 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.426778 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.524081 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" event={"ID":"83d7cd4e-4c2a-4a12-aa7d-44266ec4139f","Type":"ContainerDied","Data":"a90812096ec6b9e6278eea106c4aeca255da5be8e20df554e8f1a7116b8801f5"} Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.524157 4840 scope.go:117] "RemoveContainer" containerID="c7bae538a1761ed67fd1d8c5b8a3afebfaca317d475d3bce7cc691a15f4ddb6f" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.524185 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86d9875b97-d5bzm" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.525785 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-f4pbw" event={"ID":"4c371bcf-af55-4ef1-a3ad-172b2db7bfbb","Type":"ContainerDied","Data":"e690d33f1dcbe85482ae0fb7cbf1d3e2edaa01ede558244a7384d9cf1569b4f8"} Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.525831 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-f4pbw" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.525846 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e690d33f1dcbe85482ae0fb7cbf1d3e2edaa01ede558244a7384d9cf1569b4f8" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.559565 4840 scope.go:117] "RemoveContainer" containerID="973aaa1c5dfce10cd0c3b0b0d914e087daab3e60f2cdd4ae7bb3cd0b835077fd" Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.583849 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-d5bzm"] Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.595270 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-d5bzm"] Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.643412 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.643637 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerName="nova-api-log" containerID="cri-o://038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607" gracePeriod=30 Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.644111 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerName="nova-api-api" containerID="cri-o://bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531" gracePeriod=30 Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.817865 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:20:11 crc kubenswrapper[4840]: I1209 17:20:11.965883 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.063683 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-config-data\") pod \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.063729 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gn4dg\" (UniqueName: \"kubernetes.io/projected/ce552d37-bf06-4a84-a9a0-111ad1b9698b-kube-api-access-gn4dg\") pod \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.063772 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-scripts\") pod \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.063952 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-combined-ca-bundle\") pod \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\" (UID: \"ce552d37-bf06-4a84-a9a0-111ad1b9698b\") " Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.071193 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce552d37-bf06-4a84-a9a0-111ad1b9698b-kube-api-access-gn4dg" (OuterVolumeSpecName: "kube-api-access-gn4dg") pod "ce552d37-bf06-4a84-a9a0-111ad1b9698b" (UID: "ce552d37-bf06-4a84-a9a0-111ad1b9698b"). InnerVolumeSpecName "kube-api-access-gn4dg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.076370 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-scripts" (OuterVolumeSpecName: "scripts") pod "ce552d37-bf06-4a84-a9a0-111ad1b9698b" (UID: "ce552d37-bf06-4a84-a9a0-111ad1b9698b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.100672 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce552d37-bf06-4a84-a9a0-111ad1b9698b" (UID: "ce552d37-bf06-4a84-a9a0-111ad1b9698b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.101075 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-config-data" (OuterVolumeSpecName: "config-data") pod "ce552d37-bf06-4a84-a9a0-111ad1b9698b" (UID: "ce552d37-bf06-4a84-a9a0-111ad1b9698b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.166476 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gn4dg\" (UniqueName: \"kubernetes.io/projected/ce552d37-bf06-4a84-a9a0-111ad1b9698b-kube-api-access-gn4dg\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.166522 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.166541 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.166553 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce552d37-bf06-4a84-a9a0-111ad1b9698b-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.537331 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-64jbl" event={"ID":"ce552d37-bf06-4a84-a9a0-111ad1b9698b","Type":"ContainerDied","Data":"0e13e06933d577b290c4d6f178e98362c04a5a9f7eca85f92dad0ef0629c385b"} Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.537642 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e13e06933d577b290c4d6f178e98362c04a5a9f7eca85f92dad0ef0629c385b" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.537379 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-64jbl" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.542471 4840 generic.go:334] "Generic (PLEG): container finished" podID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerID="038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607" exitCode=143 Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.542528 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c99bfc51-b110-4fca-a9eb-cf517044c149","Type":"ContainerDied","Data":"038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607"} Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.543935 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="426cbf78-2ebf-4210-a349-e744a967b288" containerName="nova-scheduler-scheduler" containerID="cri-o://c290089d2cddba2de1dcebcb2b1a9562357e44839af9b8cb9e6daf6ff2ef5f2b" gracePeriod=30 Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.655933 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" path="/var/lib/kubelet/pods/83d7cd4e-4c2a-4a12-aa7d-44266ec4139f/volumes" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.664110 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 09 17:20:12 crc kubenswrapper[4840]: E1209 17:20:12.664711 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c371bcf-af55-4ef1-a3ad-172b2db7bfbb" containerName="nova-manage" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.664727 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c371bcf-af55-4ef1-a3ad-172b2db7bfbb" containerName="nova-manage" Dec 09 17:20:12 crc kubenswrapper[4840]: E1209 17:20:12.664751 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" containerName="init" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.664757 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" containerName="init" Dec 09 17:20:12 crc kubenswrapper[4840]: E1209 17:20:12.664772 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" containerName="dnsmasq-dns" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.664778 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" containerName="dnsmasq-dns" Dec 09 17:20:12 crc kubenswrapper[4840]: E1209 17:20:12.664793 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce552d37-bf06-4a84-a9a0-111ad1b9698b" containerName="nova-cell1-conductor-db-sync" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.664799 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce552d37-bf06-4a84-a9a0-111ad1b9698b" containerName="nova-cell1-conductor-db-sync" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.665111 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c371bcf-af55-4ef1-a3ad-172b2db7bfbb" containerName="nova-manage" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.665136 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="83d7cd4e-4c2a-4a12-aa7d-44266ec4139f" containerName="dnsmasq-dns" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.665145 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce552d37-bf06-4a84-a9a0-111ad1b9698b" containerName="nova-cell1-conductor-db-sync" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.665846 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.669387 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.676975 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.777160 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2nc7\" (UniqueName: \"kubernetes.io/projected/5bd10e5c-6713-40dc-b744-349603d760f6-kube-api-access-t2nc7\") pod \"nova-cell1-conductor-0\" (UID: \"5bd10e5c-6713-40dc-b744-349603d760f6\") " pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.777255 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bd10e5c-6713-40dc-b744-349603d760f6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5bd10e5c-6713-40dc-b744-349603d760f6\") " pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.777338 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bd10e5c-6713-40dc-b744-349603d760f6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5bd10e5c-6713-40dc-b744-349603d760f6\") " pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.878807 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bd10e5c-6713-40dc-b744-349603d760f6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5bd10e5c-6713-40dc-b744-349603d760f6\") " pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.878928 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bd10e5c-6713-40dc-b744-349603d760f6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5bd10e5c-6713-40dc-b744-349603d760f6\") " pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.879074 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2nc7\" (UniqueName: \"kubernetes.io/projected/5bd10e5c-6713-40dc-b744-349603d760f6-kube-api-access-t2nc7\") pod \"nova-cell1-conductor-0\" (UID: \"5bd10e5c-6713-40dc-b744-349603d760f6\") " pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.884607 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bd10e5c-6713-40dc-b744-349603d760f6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5bd10e5c-6713-40dc-b744-349603d760f6\") " pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.885324 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bd10e5c-6713-40dc-b744-349603d760f6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5bd10e5c-6713-40dc-b744-349603d760f6\") " pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.897542 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2nc7\" (UniqueName: \"kubernetes.io/projected/5bd10e5c-6713-40dc-b744-349603d760f6-kube-api-access-t2nc7\") pod \"nova-cell1-conductor-0\" (UID: \"5bd10e5c-6713-40dc-b744-349603d760f6\") " pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:12 crc kubenswrapper[4840]: I1209 17:20:12.994713 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:13 crc kubenswrapper[4840]: W1209 17:20:13.503589 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5bd10e5c_6713_40dc_b744_349603d760f6.slice/crio-89cd52dddaa81cd80e7363c51c399fa814bc867edd863d7b4eb25cdb289028bc WatchSource:0}: Error finding container 89cd52dddaa81cd80e7363c51c399fa814bc867edd863d7b4eb25cdb289028bc: Status 404 returned error can't find the container with id 89cd52dddaa81cd80e7363c51c399fa814bc867edd863d7b4eb25cdb289028bc Dec 09 17:20:13 crc kubenswrapper[4840]: I1209 17:20:13.503642 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 09 17:20:13 crc kubenswrapper[4840]: I1209 17:20:13.559290 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5bd10e5c-6713-40dc-b744-349603d760f6","Type":"ContainerStarted","Data":"89cd52dddaa81cd80e7363c51c399fa814bc867edd863d7b4eb25cdb289028bc"} Dec 09 17:20:13 crc kubenswrapper[4840]: I1209 17:20:13.813821 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pl44l"] Dec 09 17:20:13 crc kubenswrapper[4840]: I1209 17:20:13.816097 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:13 crc kubenswrapper[4840]: I1209 17:20:13.849476 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pl44l"] Dec 09 17:20:13 crc kubenswrapper[4840]: I1209 17:20:13.903180 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfvb5\" (UniqueName: \"kubernetes.io/projected/95c5128d-5180-4388-a4fa-c7252e8ccdc3-kube-api-access-kfvb5\") pod \"redhat-operators-pl44l\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:13 crc kubenswrapper[4840]: I1209 17:20:13.903431 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-utilities\") pod \"redhat-operators-pl44l\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:13 crc kubenswrapper[4840]: I1209 17:20:13.903485 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-catalog-content\") pod \"redhat-operators-pl44l\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.004832 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfvb5\" (UniqueName: \"kubernetes.io/projected/95c5128d-5180-4388-a4fa-c7252e8ccdc3-kube-api-access-kfvb5\") pod \"redhat-operators-pl44l\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.004881 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-utilities\") pod \"redhat-operators-pl44l\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.004938 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-catalog-content\") pod \"redhat-operators-pl44l\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.005599 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-utilities\") pod \"redhat-operators-pl44l\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.005630 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-catalog-content\") pod \"redhat-operators-pl44l\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.027155 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfvb5\" (UniqueName: \"kubernetes.io/projected/95c5128d-5180-4388-a4fa-c7252e8ccdc3-kube-api-access-kfvb5\") pod \"redhat-operators-pl44l\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.170184 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.574110 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5bd10e5c-6713-40dc-b744-349603d760f6","Type":"ContainerStarted","Data":"f7349373ae086903c35f77d3da7e30d9e8f0615ba0f1f39742dd5bd3b4dc120f"} Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.575831 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.603557 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.603539792 podStartE2EDuration="2.603539792s" podCreationTimestamp="2025-12-09 17:20:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:20:14.592051046 +0000 UTC m=+1400.583161669" watchObservedRunningTime="2025-12-09 17:20:14.603539792 +0000 UTC m=+1400.594650425" Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.730510 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pl44l"] Dec 09 17:20:14 crc kubenswrapper[4840]: I1209 17:20:14.771009 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 09 17:20:14 crc kubenswrapper[4840]: E1209 17:20:14.835619 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c290089d2cddba2de1dcebcb2b1a9562357e44839af9b8cb9e6daf6ff2ef5f2b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 17:20:14 crc kubenswrapper[4840]: E1209 17:20:14.846807 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c290089d2cddba2de1dcebcb2b1a9562357e44839af9b8cb9e6daf6ff2ef5f2b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 17:20:14 crc kubenswrapper[4840]: E1209 17:20:14.848518 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c290089d2cddba2de1dcebcb2b1a9562357e44839af9b8cb9e6daf6ff2ef5f2b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 17:20:14 crc kubenswrapper[4840]: E1209 17:20:14.848551 4840 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="426cbf78-2ebf-4210-a349-e744a967b288" containerName="nova-scheduler-scheduler" Dec 09 17:20:15 crc kubenswrapper[4840]: I1209 17:20:15.586017 4840 generic.go:334] "Generic (PLEG): container finished" podID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerID="9340a8d9e934adc2f143679e2fac9914f6b9f33f80658765eec88469f9133314" exitCode=0 Dec 09 17:20:15 crc kubenswrapper[4840]: I1209 17:20:15.586080 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pl44l" event={"ID":"95c5128d-5180-4388-a4fa-c7252e8ccdc3","Type":"ContainerDied","Data":"9340a8d9e934adc2f143679e2fac9914f6b9f33f80658765eec88469f9133314"} Dec 09 17:20:15 crc kubenswrapper[4840]: I1209 17:20:15.586410 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pl44l" event={"ID":"95c5128d-5180-4388-a4fa-c7252e8ccdc3","Type":"ContainerStarted","Data":"0b3a1148e2ccd629f3e51dc7116d1478c086851898327d8b47b79888bb65f337"} Dec 09 17:20:16 crc kubenswrapper[4840]: I1209 17:20:16.601865 4840 generic.go:334] "Generic (PLEG): container finished" podID="426cbf78-2ebf-4210-a349-e744a967b288" containerID="c290089d2cddba2de1dcebcb2b1a9562357e44839af9b8cb9e6daf6ff2ef5f2b" exitCode=0 Dec 09 17:20:16 crc kubenswrapper[4840]: I1209 17:20:16.602112 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"426cbf78-2ebf-4210-a349-e744a967b288","Type":"ContainerDied","Data":"c290089d2cddba2de1dcebcb2b1a9562357e44839af9b8cb9e6daf6ff2ef5f2b"} Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.066852 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.179179 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-combined-ca-bundle\") pod \"426cbf78-2ebf-4210-a349-e744a967b288\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.179243 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-config-data\") pod \"426cbf78-2ebf-4210-a349-e744a967b288\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.179405 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hd77\" (UniqueName: \"kubernetes.io/projected/426cbf78-2ebf-4210-a349-e744a967b288-kube-api-access-2hd77\") pod \"426cbf78-2ebf-4210-a349-e744a967b288\" (UID: \"426cbf78-2ebf-4210-a349-e744a967b288\") " Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.185907 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/426cbf78-2ebf-4210-a349-e744a967b288-kube-api-access-2hd77" (OuterVolumeSpecName: "kube-api-access-2hd77") pod "426cbf78-2ebf-4210-a349-e744a967b288" (UID: "426cbf78-2ebf-4210-a349-e744a967b288"). InnerVolumeSpecName "kube-api-access-2hd77". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.206611 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-config-data" (OuterVolumeSpecName: "config-data") pod "426cbf78-2ebf-4210-a349-e744a967b288" (UID: "426cbf78-2ebf-4210-a349-e744a967b288"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.208095 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "426cbf78-2ebf-4210-a349-e744a967b288" (UID: "426cbf78-2ebf-4210-a349-e744a967b288"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.281398 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hd77\" (UniqueName: \"kubernetes.io/projected/426cbf78-2ebf-4210-a349-e744a967b288-kube-api-access-2hd77\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.281431 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.281441 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/426cbf78-2ebf-4210-a349-e744a967b288-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.617673 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"426cbf78-2ebf-4210-a349-e744a967b288","Type":"ContainerDied","Data":"d8275abb0e6f37ead9605235a9722fe5d00ba122f671691d06e9a50f2f572224"} Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.617739 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.618052 4840 scope.go:117] "RemoveContainer" containerID="c290089d2cddba2de1dcebcb2b1a9562357e44839af9b8cb9e6daf6ff2ef5f2b" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.621071 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pl44l" event={"ID":"95c5128d-5180-4388-a4fa-c7252e8ccdc3","Type":"ContainerStarted","Data":"cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3"} Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.706533 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.719025 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.729120 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:20:17 crc kubenswrapper[4840]: E1209 17:20:17.729746 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="426cbf78-2ebf-4210-a349-e744a967b288" containerName="nova-scheduler-scheduler" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.729773 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="426cbf78-2ebf-4210-a349-e744a967b288" containerName="nova-scheduler-scheduler" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.730056 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="426cbf78-2ebf-4210-a349-e744a967b288" containerName="nova-scheduler-scheduler" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.730983 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.732791 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.740881 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.799931 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-config-data\") pod \"nova-scheduler-0\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " pod="openstack/nova-scheduler-0" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.800134 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7wsb\" (UniqueName: \"kubernetes.io/projected/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-kube-api-access-g7wsb\") pod \"nova-scheduler-0\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " pod="openstack/nova-scheduler-0" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.800181 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " pod="openstack/nova-scheduler-0" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.900796 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7wsb\" (UniqueName: \"kubernetes.io/projected/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-kube-api-access-g7wsb\") pod \"nova-scheduler-0\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " pod="openstack/nova-scheduler-0" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.900854 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " pod="openstack/nova-scheduler-0" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.900908 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-config-data\") pod \"nova-scheduler-0\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " pod="openstack/nova-scheduler-0" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.918747 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-config-data\") pod \"nova-scheduler-0\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " pod="openstack/nova-scheduler-0" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.918835 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " pod="openstack/nova-scheduler-0" Dec 09 17:20:17 crc kubenswrapper[4840]: I1209 17:20:17.929597 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7wsb\" (UniqueName: \"kubernetes.io/projected/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-kube-api-access-g7wsb\") pod \"nova-scheduler-0\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " pod="openstack/nova-scheduler-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.050847 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.242235 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.313332 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-combined-ca-bundle\") pod \"c99bfc51-b110-4fca-a9eb-cf517044c149\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.313628 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c99bfc51-b110-4fca-a9eb-cf517044c149-logs\") pod \"c99bfc51-b110-4fca-a9eb-cf517044c149\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.313732 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-config-data\") pod \"c99bfc51-b110-4fca-a9eb-cf517044c149\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.313773 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qvp5\" (UniqueName: \"kubernetes.io/projected/c99bfc51-b110-4fca-a9eb-cf517044c149-kube-api-access-5qvp5\") pod \"c99bfc51-b110-4fca-a9eb-cf517044c149\" (UID: \"c99bfc51-b110-4fca-a9eb-cf517044c149\") " Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.314537 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c99bfc51-b110-4fca-a9eb-cf517044c149-logs" (OuterVolumeSpecName: "logs") pod "c99bfc51-b110-4fca-a9eb-cf517044c149" (UID: "c99bfc51-b110-4fca-a9eb-cf517044c149"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.319319 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c99bfc51-b110-4fca-a9eb-cf517044c149-kube-api-access-5qvp5" (OuterVolumeSpecName: "kube-api-access-5qvp5") pod "c99bfc51-b110-4fca-a9eb-cf517044c149" (UID: "c99bfc51-b110-4fca-a9eb-cf517044c149"). InnerVolumeSpecName "kube-api-access-5qvp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.350500 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c99bfc51-b110-4fca-a9eb-cf517044c149" (UID: "c99bfc51-b110-4fca-a9eb-cf517044c149"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.365377 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-config-data" (OuterVolumeSpecName: "config-data") pod "c99bfc51-b110-4fca-a9eb-cf517044c149" (UID: "c99bfc51-b110-4fca-a9eb-cf517044c149"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.416033 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.416065 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c99bfc51-b110-4fca-a9eb-cf517044c149-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.416077 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c99bfc51-b110-4fca-a9eb-cf517044c149-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.416087 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qvp5\" (UniqueName: \"kubernetes.io/projected/c99bfc51-b110-4fca-a9eb-cf517044c149-kube-api-access-5qvp5\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.547604 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.652550 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="426cbf78-2ebf-4210-a349-e744a967b288" path="/var/lib/kubelet/pods/426cbf78-2ebf-4210-a349-e744a967b288/volumes" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.671252 4840 generic.go:334] "Generic (PLEG): container finished" podID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerID="bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531" exitCode=0 Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.671319 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c99bfc51-b110-4fca-a9eb-cf517044c149","Type":"ContainerDied","Data":"bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531"} Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.671816 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.671949 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c99bfc51-b110-4fca-a9eb-cf517044c149","Type":"ContainerDied","Data":"bccb28de6559df1e2f947dafcaae1b9b780928af798b6f27a79e3bf20cbac98f"} Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.671994 4840 scope.go:117] "RemoveContainer" containerID="bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.680370 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8","Type":"ContainerStarted","Data":"eda4216ef8ad7aabb0d2abb88111da701af148eda8370800c86ea9e77bd32531"} Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.745156 4840 scope.go:117] "RemoveContainer" containerID="038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.778667 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.800790 4840 scope.go:117] "RemoveContainer" containerID="bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.800918 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:18 crc kubenswrapper[4840]: E1209 17:20:18.804720 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531\": container with ID starting with bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531 not found: ID does not exist" containerID="bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.804766 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531"} err="failed to get container status \"bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531\": rpc error: code = NotFound desc = could not find container \"bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531\": container with ID starting with bc6b8bf9c43aa0c8af95ddfd8fb96522836ac9c1f6e04cfbc0d1a2ad6b687531 not found: ID does not exist" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.804796 4840 scope.go:117] "RemoveContainer" containerID="038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607" Dec 09 17:20:18 crc kubenswrapper[4840]: E1209 17:20:18.808205 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607\": container with ID starting with 038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607 not found: ID does not exist" containerID="038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.808235 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607"} err="failed to get container status \"038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607\": rpc error: code = NotFound desc = could not find container \"038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607\": container with ID starting with 038caf1b2ec858be462a916df461098cd6cc6269c98180848112bba37a32e607 not found: ID does not exist" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.811019 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:18 crc kubenswrapper[4840]: E1209 17:20:18.811529 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerName="nova-api-log" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.811546 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerName="nova-api-log" Dec 09 17:20:18 crc kubenswrapper[4840]: E1209 17:20:18.811563 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerName="nova-api-api" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.811570 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerName="nova-api-api" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.811755 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerName="nova-api-log" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.811775 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="c99bfc51-b110-4fca-a9eb-cf517044c149" containerName="nova-api-api" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.812892 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.816617 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.820655 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.854783 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75n6r\" (UniqueName: \"kubernetes.io/projected/76a53983-e1bf-48e4-9b46-4028bc79a9c9-kube-api-access-75n6r\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.854886 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.855017 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/76a53983-e1bf-48e4-9b46-4028bc79a9c9-logs\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.855051 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-config-data\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.957592 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/76a53983-e1bf-48e4-9b46-4028bc79a9c9-logs\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.957658 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-config-data\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.957779 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75n6r\" (UniqueName: \"kubernetes.io/projected/76a53983-e1bf-48e4-9b46-4028bc79a9c9-kube-api-access-75n6r\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.957882 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.958012 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/76a53983-e1bf-48e4-9b46-4028bc79a9c9-logs\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.962621 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.972359 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-config-data\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:18 crc kubenswrapper[4840]: I1209 17:20:18.975684 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75n6r\" (UniqueName: \"kubernetes.io/projected/76a53983-e1bf-48e4-9b46-4028bc79a9c9-kube-api-access-75n6r\") pod \"nova-api-0\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " pod="openstack/nova-api-0" Dec 09 17:20:19 crc kubenswrapper[4840]: I1209 17:20:19.010017 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 17:20:19 crc kubenswrapper[4840]: I1209 17:20:19.010217 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="13330488-1e93-4a88-8f15-331ee0b935cf" containerName="kube-state-metrics" containerID="cri-o://8c95a524b9ede1e17b6b9134c9b6fa5150668a10dbdf64d70f20c22e76020e1d" gracePeriod=30 Dec 09 17:20:19 crc kubenswrapper[4840]: I1209 17:20:19.131121 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:20:19 crc kubenswrapper[4840]: I1209 17:20:19.637106 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:19 crc kubenswrapper[4840]: I1209 17:20:19.691081 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"76a53983-e1bf-48e4-9b46-4028bc79a9c9","Type":"ContainerStarted","Data":"f0adf097bf5f520207a7cbfa51ccb01da443f332b5438e24026be8e2b0122890"} Dec 09 17:20:19 crc kubenswrapper[4840]: I1209 17:20:19.693773 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8","Type":"ContainerStarted","Data":"b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54"} Dec 09 17:20:19 crc kubenswrapper[4840]: I1209 17:20:19.695883 4840 generic.go:334] "Generic (PLEG): container finished" podID="13330488-1e93-4a88-8f15-331ee0b935cf" containerID="8c95a524b9ede1e17b6b9134c9b6fa5150668a10dbdf64d70f20c22e76020e1d" exitCode=2 Dec 09 17:20:19 crc kubenswrapper[4840]: I1209 17:20:19.695974 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"13330488-1e93-4a88-8f15-331ee0b935cf","Type":"ContainerDied","Data":"8c95a524b9ede1e17b6b9134c9b6fa5150668a10dbdf64d70f20c22e76020e1d"} Dec 09 17:20:19 crc kubenswrapper[4840]: I1209 17:20:19.719395 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.7193725 podStartE2EDuration="2.7193725s" podCreationTimestamp="2025-12-09 17:20:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:20:19.712814774 +0000 UTC m=+1405.703925407" watchObservedRunningTime="2025-12-09 17:20:19.7193725 +0000 UTC m=+1405.710483133" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.531943 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.598728 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbpb2\" (UniqueName: \"kubernetes.io/projected/13330488-1e93-4a88-8f15-331ee0b935cf-kube-api-access-kbpb2\") pod \"13330488-1e93-4a88-8f15-331ee0b935cf\" (UID: \"13330488-1e93-4a88-8f15-331ee0b935cf\") " Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.605361 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13330488-1e93-4a88-8f15-331ee0b935cf-kube-api-access-kbpb2" (OuterVolumeSpecName: "kube-api-access-kbpb2") pod "13330488-1e93-4a88-8f15-331ee0b935cf" (UID: "13330488-1e93-4a88-8f15-331ee0b935cf"). InnerVolumeSpecName "kube-api-access-kbpb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.619648 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c99bfc51-b110-4fca-a9eb-cf517044c149" path="/var/lib/kubelet/pods/c99bfc51-b110-4fca-a9eb-cf517044c149/volumes" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.706873 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.707010 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"13330488-1e93-4a88-8f15-331ee0b935cf","Type":"ContainerDied","Data":"6274cdbe34fbcdb610c4c75f363a6dcd672e44140554acd80c3ebd84a5519342"} Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.707051 4840 scope.go:117] "RemoveContainer" containerID="8c95a524b9ede1e17b6b9134c9b6fa5150668a10dbdf64d70f20c22e76020e1d" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.713778 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbpb2\" (UniqueName: \"kubernetes.io/projected/13330488-1e93-4a88-8f15-331ee0b935cf-kube-api-access-kbpb2\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.716645 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"76a53983-e1bf-48e4-9b46-4028bc79a9c9","Type":"ContainerStarted","Data":"c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33"} Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.876356 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.895939 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.921922 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 17:20:20 crc kubenswrapper[4840]: E1209 17:20:20.922458 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13330488-1e93-4a88-8f15-331ee0b935cf" containerName="kube-state-metrics" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.922479 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="13330488-1e93-4a88-8f15-331ee0b935cf" containerName="kube-state-metrics" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.922649 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="13330488-1e93-4a88-8f15-331ee0b935cf" containerName="kube-state-metrics" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.923426 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.930442 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.930875 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Dec 09 17:20:20 crc kubenswrapper[4840]: I1209 17:20:20.932929 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.020142 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.020409 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.020475 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.020860 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w56dl\" (UniqueName: \"kubernetes.io/projected/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-kube-api-access-w56dl\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.122565 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.122681 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.122717 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.122866 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w56dl\" (UniqueName: \"kubernetes.io/projected/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-kube-api-access-w56dl\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.126723 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.126849 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.153038 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.178379 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w56dl\" (UniqueName: \"kubernetes.io/projected/4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9-kube-api-access-w56dl\") pod \"kube-state-metrics-0\" (UID: \"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9\") " pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.193007 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.193817 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="ceilometer-central-agent" containerID="cri-o://52e72cfb19db1cc28121ed9046e7996ed89051ecb6e7e82a38bbe641c145cf2a" gracePeriod=30 Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.194405 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="proxy-httpd" containerID="cri-o://bf76242ddfc463241c160b84973855eadb4f49e3200b6202aaad802a1fdfbc31" gracePeriod=30 Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.194549 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="ceilometer-notification-agent" containerID="cri-o://36a573364ef107e928e9057bef7b05b1f72daca22d81528fdece7a579d4f2eb3" gracePeriod=30 Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.194594 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="sg-core" containerID="cri-o://48540dacd1732366cf82cd600c22128adfc1dbd5bf924db8d85f35e18bb1007c" gracePeriod=30 Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.248420 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 17:20:21 crc kubenswrapper[4840]: I1209 17:20:21.729132 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.622921 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13330488-1e93-4a88-8f15-331ee0b935cf" path="/var/lib/kubelet/pods/13330488-1e93-4a88-8f15-331ee0b935cf/volumes" Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.742285 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9","Type":"ContainerStarted","Data":"464897acc4fdfa35c9f08efcdfa4e2cc57971183d4c805fbe21dbbe1792f5bac"} Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.742329 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9","Type":"ContainerStarted","Data":"e437f0eaeeb5e5dc8aa983292a7b6734637d69f785d341bbf36d3b6264c348a8"} Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.743508 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.745747 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"76a53983-e1bf-48e4-9b46-4028bc79a9c9","Type":"ContainerStarted","Data":"ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df"} Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.748740 4840 generic.go:334] "Generic (PLEG): container finished" podID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerID="bf76242ddfc463241c160b84973855eadb4f49e3200b6202aaad802a1fdfbc31" exitCode=0 Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.748771 4840 generic.go:334] "Generic (PLEG): container finished" podID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerID="48540dacd1732366cf82cd600c22128adfc1dbd5bf924db8d85f35e18bb1007c" exitCode=2 Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.748783 4840 generic.go:334] "Generic (PLEG): container finished" podID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerID="36a573364ef107e928e9057bef7b05b1f72daca22d81528fdece7a579d4f2eb3" exitCode=0 Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.748791 4840 generic.go:334] "Generic (PLEG): container finished" podID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerID="52e72cfb19db1cc28121ed9046e7996ed89051ecb6e7e82a38bbe641c145cf2a" exitCode=0 Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.748824 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a9c8223-82eb-43fd-b848-95010af7d16d","Type":"ContainerDied","Data":"bf76242ddfc463241c160b84973855eadb4f49e3200b6202aaad802a1fdfbc31"} Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.748842 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a9c8223-82eb-43fd-b848-95010af7d16d","Type":"ContainerDied","Data":"48540dacd1732366cf82cd600c22128adfc1dbd5bf924db8d85f35e18bb1007c"} Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.748851 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a9c8223-82eb-43fd-b848-95010af7d16d","Type":"ContainerDied","Data":"36a573364ef107e928e9057bef7b05b1f72daca22d81528fdece7a579d4f2eb3"} Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.748859 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a9c8223-82eb-43fd-b848-95010af7d16d","Type":"ContainerDied","Data":"52e72cfb19db1cc28121ed9046e7996ed89051ecb6e7e82a38bbe641c145cf2a"} Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.750981 4840 generic.go:334] "Generic (PLEG): container finished" podID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerID="cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3" exitCode=0 Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.751018 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pl44l" event={"ID":"95c5128d-5180-4388-a4fa-c7252e8ccdc3","Type":"ContainerDied","Data":"cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3"} Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.766793 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.390388747 podStartE2EDuration="2.766775254s" podCreationTimestamp="2025-12-09 17:20:20 +0000 UTC" firstStartedPulling="2025-12-09 17:20:21.73938559 +0000 UTC m=+1407.730496223" lastFinishedPulling="2025-12-09 17:20:22.115772087 +0000 UTC m=+1408.106882730" observedRunningTime="2025-12-09 17:20:22.762127542 +0000 UTC m=+1408.753238195" watchObservedRunningTime="2025-12-09 17:20:22.766775254 +0000 UTC m=+1408.757885887" Dec 09 17:20:22 crc kubenswrapper[4840]: I1209 17:20:22.796702 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.796684112 podStartE2EDuration="4.796684112s" podCreationTimestamp="2025-12-09 17:20:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:20:22.792497043 +0000 UTC m=+1408.783607676" watchObservedRunningTime="2025-12-09 17:20:22.796684112 +0000 UTC m=+1408.787794745" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.027799 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.052717 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.206295 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.272764 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-combined-ca-bundle\") pod \"5a9c8223-82eb-43fd-b848-95010af7d16d\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.272831 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-sg-core-conf-yaml\") pod \"5a9c8223-82eb-43fd-b848-95010af7d16d\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.272857 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-log-httpd\") pod \"5a9c8223-82eb-43fd-b848-95010af7d16d\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.273054 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qp7bh\" (UniqueName: \"kubernetes.io/projected/5a9c8223-82eb-43fd-b848-95010af7d16d-kube-api-access-qp7bh\") pod \"5a9c8223-82eb-43fd-b848-95010af7d16d\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.273112 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-run-httpd\") pod \"5a9c8223-82eb-43fd-b848-95010af7d16d\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.273138 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-scripts\") pod \"5a9c8223-82eb-43fd-b848-95010af7d16d\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.273251 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-config-data\") pod \"5a9c8223-82eb-43fd-b848-95010af7d16d\" (UID: \"5a9c8223-82eb-43fd-b848-95010af7d16d\") " Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.273695 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5a9c8223-82eb-43fd-b848-95010af7d16d" (UID: "5a9c8223-82eb-43fd-b848-95010af7d16d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.273995 4840 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.274887 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5a9c8223-82eb-43fd-b848-95010af7d16d" (UID: "5a9c8223-82eb-43fd-b848-95010af7d16d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.282170 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-scripts" (OuterVolumeSpecName: "scripts") pod "5a9c8223-82eb-43fd-b848-95010af7d16d" (UID: "5a9c8223-82eb-43fd-b848-95010af7d16d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.282708 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a9c8223-82eb-43fd-b848-95010af7d16d-kube-api-access-qp7bh" (OuterVolumeSpecName: "kube-api-access-qp7bh") pod "5a9c8223-82eb-43fd-b848-95010af7d16d" (UID: "5a9c8223-82eb-43fd-b848-95010af7d16d"). InnerVolumeSpecName "kube-api-access-qp7bh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.356482 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5a9c8223-82eb-43fd-b848-95010af7d16d" (UID: "5a9c8223-82eb-43fd-b848-95010af7d16d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.375770 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qp7bh\" (UniqueName: \"kubernetes.io/projected/5a9c8223-82eb-43fd-b848-95010af7d16d-kube-api-access-qp7bh\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.375803 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.375812 4840 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.375820 4840 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a9c8223-82eb-43fd-b848-95010af7d16d-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.382179 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a9c8223-82eb-43fd-b848-95010af7d16d" (UID: "5a9c8223-82eb-43fd-b848-95010af7d16d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.434922 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-config-data" (OuterVolumeSpecName: "config-data") pod "5a9c8223-82eb-43fd-b848-95010af7d16d" (UID: "5a9c8223-82eb-43fd-b848-95010af7d16d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.478022 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.478053 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a9c8223-82eb-43fd-b848-95010af7d16d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.764065 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pl44l" event={"ID":"95c5128d-5180-4388-a4fa-c7252e8ccdc3","Type":"ContainerStarted","Data":"f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1"} Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.768296 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.770944 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a9c8223-82eb-43fd-b848-95010af7d16d","Type":"ContainerDied","Data":"9421e0718479cd7ae9f4f50cdb9010f0d34441319e0005734f340a34965a50c2"} Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.771167 4840 scope.go:117] "RemoveContainer" containerID="bf76242ddfc463241c160b84973855eadb4f49e3200b6202aaad802a1fdfbc31" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.810869 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pl44l" podStartSLOduration=3.129610322 podStartE2EDuration="10.810848981s" podCreationTimestamp="2025-12-09 17:20:13 +0000 UTC" firstStartedPulling="2025-12-09 17:20:15.591213419 +0000 UTC m=+1401.582324052" lastFinishedPulling="2025-12-09 17:20:23.272452078 +0000 UTC m=+1409.263562711" observedRunningTime="2025-12-09 17:20:23.791727408 +0000 UTC m=+1409.782838051" watchObservedRunningTime="2025-12-09 17:20:23.810848981 +0000 UTC m=+1409.801959614" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.826089 4840 scope.go:117] "RemoveContainer" containerID="48540dacd1732366cf82cd600c22128adfc1dbd5bf924db8d85f35e18bb1007c" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.834088 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.846434 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.876764 4840 scope.go:117] "RemoveContainer" containerID="36a573364ef107e928e9057bef7b05b1f72daca22d81528fdece7a579d4f2eb3" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.913053 4840 scope.go:117] "RemoveContainer" containerID="52e72cfb19db1cc28121ed9046e7996ed89051ecb6e7e82a38bbe641c145cf2a" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.923347 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:23 crc kubenswrapper[4840]: E1209 17:20:23.923763 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="ceilometer-central-agent" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.923780 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="ceilometer-central-agent" Dec 09 17:20:23 crc kubenswrapper[4840]: E1209 17:20:23.923807 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="proxy-httpd" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.923813 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="proxy-httpd" Dec 09 17:20:23 crc kubenswrapper[4840]: E1209 17:20:23.923831 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="sg-core" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.923836 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="sg-core" Dec 09 17:20:23 crc kubenswrapper[4840]: E1209 17:20:23.923848 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="ceilometer-notification-agent" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.923853 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="ceilometer-notification-agent" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.924048 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="ceilometer-notification-agent" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.924067 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="sg-core" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.924084 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="ceilometer-central-agent" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.924103 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" containerName="proxy-httpd" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.926082 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.928772 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.930446 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.930546 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 17:20:23 crc kubenswrapper[4840]: I1209 17:20:23.930463 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.094006 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.094535 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.094577 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-log-httpd\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.094653 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-scripts\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.094678 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.094721 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-config-data\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.094840 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fkr2\" (UniqueName: \"kubernetes.io/projected/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-kube-api-access-5fkr2\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.094900 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-run-httpd\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.170849 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.170900 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.196743 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fkr2\" (UniqueName: \"kubernetes.io/projected/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-kube-api-access-5fkr2\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.196795 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-run-httpd\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.196850 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.196935 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.196950 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-log-httpd\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.196991 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-scripts\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.197009 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.197364 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-run-httpd\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.197386 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-log-httpd\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.197684 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-config-data\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.201766 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-scripts\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.202450 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.203472 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-config-data\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.207261 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.213999 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.214677 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fkr2\" (UniqueName: \"kubernetes.io/projected/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-kube-api-access-5fkr2\") pod \"ceilometer-0\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.256874 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.618885 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a9c8223-82eb-43fd-b848-95010af7d16d" path="/var/lib/kubelet/pods/5a9c8223-82eb-43fd-b848-95010af7d16d/volumes" Dec 09 17:20:24 crc kubenswrapper[4840]: W1209 17:20:24.729081 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1d8f406a_a65c_41f9_a8cc_21be5af1dcaa.slice/crio-0ddc478c2f1921274cd4e15dc10848ae430ec4cace528ca4545b6d751a60a357 WatchSource:0}: Error finding container 0ddc478c2f1921274cd4e15dc10848ae430ec4cace528ca4545b6d751a60a357: Status 404 returned error can't find the container with id 0ddc478c2f1921274cd4e15dc10848ae430ec4cace528ca4545b6d751a60a357 Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.737036 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:24 crc kubenswrapper[4840]: I1209 17:20:24.782006 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa","Type":"ContainerStarted","Data":"0ddc478c2f1921274cd4e15dc10848ae430ec4cace528ca4545b6d751a60a357"} Dec 09 17:20:25 crc kubenswrapper[4840]: I1209 17:20:25.224580 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pl44l" podUID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerName="registry-server" probeResult="failure" output=< Dec 09 17:20:25 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 17:20:25 crc kubenswrapper[4840]: > Dec 09 17:20:26 crc kubenswrapper[4840]: I1209 17:20:26.809723 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa","Type":"ContainerStarted","Data":"0dfe49a3f033bbd56658f0536a17ef9d93e2171beee7610921148bac2ea487bf"} Dec 09 17:20:27 crc kubenswrapper[4840]: I1209 17:20:27.820907 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa","Type":"ContainerStarted","Data":"1beaf98a12ca68dd684dfb02ea642b47cb6d12878189ed764ce203ecc03d1030"} Dec 09 17:20:28 crc kubenswrapper[4840]: I1209 17:20:28.052269 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 09 17:20:28 crc kubenswrapper[4840]: I1209 17:20:28.109461 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 09 17:20:28 crc kubenswrapper[4840]: I1209 17:20:28.832051 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa","Type":"ContainerStarted","Data":"24f1dc5067538d010ada6bbf0fae7e0d639aa607d3e774a15a26246aea8b32e9"} Dec 09 17:20:28 crc kubenswrapper[4840]: I1209 17:20:28.859053 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 09 17:20:29 crc kubenswrapper[4840]: I1209 17:20:29.132382 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 17:20:29 crc kubenswrapper[4840]: I1209 17:20:29.132437 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 17:20:30 crc kubenswrapper[4840]: I1209 17:20:30.215152 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.216:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 17:20:30 crc kubenswrapper[4840]: I1209 17:20:30.215223 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.216:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 17:20:31 crc kubenswrapper[4840]: I1209 17:20:31.268327 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 09 17:20:32 crc kubenswrapper[4840]: I1209 17:20:32.875367 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa","Type":"ContainerStarted","Data":"6b0b4f3f887409d71bbeda9338169a446b1714a1645ef9f94bcd497a11633aa0"} Dec 09 17:20:32 crc kubenswrapper[4840]: I1209 17:20:32.876711 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 17:20:32 crc kubenswrapper[4840]: I1209 17:20:32.898856 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.430783272 podStartE2EDuration="9.898837554s" podCreationTimestamp="2025-12-09 17:20:23 +0000 UTC" firstStartedPulling="2025-12-09 17:20:24.731980899 +0000 UTC m=+1410.723091532" lastFinishedPulling="2025-12-09 17:20:32.200035171 +0000 UTC m=+1418.191145814" observedRunningTime="2025-12-09 17:20:32.895207361 +0000 UTC m=+1418.886318004" watchObservedRunningTime="2025-12-09 17:20:32.898837554 +0000 UTC m=+1418.889948187" Dec 09 17:20:35 crc kubenswrapper[4840]: I1209 17:20:35.232263 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pl44l" podUID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerName="registry-server" probeResult="failure" output=< Dec 09 17:20:35 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 17:20:35 crc kubenswrapper[4840]: > Dec 09 17:20:35 crc kubenswrapper[4840]: I1209 17:20:35.963196 4840 generic.go:334] "Generic (PLEG): container finished" podID="97070aef-f135-4c60-9cae-21a0ae49e77c" containerID="dfac38e9bea7315daa1778d1b6ca204d863767c42a7eb63a6e83418654c03727" exitCode=137 Dec 09 17:20:35 crc kubenswrapper[4840]: I1209 17:20:35.963562 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"97070aef-f135-4c60-9cae-21a0ae49e77c","Type":"ContainerDied","Data":"dfac38e9bea7315daa1778d1b6ca204d863767c42a7eb63a6e83418654c03727"} Dec 09 17:20:35 crc kubenswrapper[4840]: I1209 17:20:35.988238 4840 generic.go:334] "Generic (PLEG): container finished" podID="08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" containerID="f050a7733a656aa215535651b2bb521742bad606f66b2d5df288daed44e89b03" exitCode=137 Dec 09 17:20:35 crc kubenswrapper[4840]: I1209 17:20:35.988286 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9","Type":"ContainerDied","Data":"f050a7733a656aa215535651b2bb521742bad606f66b2d5df288daed44e89b03"} Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.378183 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.384694 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.543300 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-config-data\") pod \"97070aef-f135-4c60-9cae-21a0ae49e77c\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.543378 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-combined-ca-bundle\") pod \"97070aef-f135-4c60-9cae-21a0ae49e77c\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.543439 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-logs\") pod \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.543479 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrwcr\" (UniqueName: \"kubernetes.io/projected/97070aef-f135-4c60-9cae-21a0ae49e77c-kube-api-access-qrwcr\") pod \"97070aef-f135-4c60-9cae-21a0ae49e77c\" (UID: \"97070aef-f135-4c60-9cae-21a0ae49e77c\") " Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.543566 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fc8p\" (UniqueName: \"kubernetes.io/projected/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-kube-api-access-9fc8p\") pod \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.543634 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-config-data\") pod \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.543653 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-combined-ca-bundle\") pod \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\" (UID: \"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9\") " Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.545373 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-logs" (OuterVolumeSpecName: "logs") pod "08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" (UID: "08c91ed1-21ea-4eba-83f5-b3a7073ad8b9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.557928 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97070aef-f135-4c60-9cae-21a0ae49e77c-kube-api-access-qrwcr" (OuterVolumeSpecName: "kube-api-access-qrwcr") pod "97070aef-f135-4c60-9cae-21a0ae49e77c" (UID: "97070aef-f135-4c60-9cae-21a0ae49e77c"). InnerVolumeSpecName "kube-api-access-qrwcr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.566916 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-kube-api-access-9fc8p" (OuterVolumeSpecName: "kube-api-access-9fc8p") pod "08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" (UID: "08c91ed1-21ea-4eba-83f5-b3a7073ad8b9"). InnerVolumeSpecName "kube-api-access-9fc8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.582452 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-config-data" (OuterVolumeSpecName: "config-data") pod "08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" (UID: "08c91ed1-21ea-4eba-83f5-b3a7073ad8b9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.585600 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-config-data" (OuterVolumeSpecName: "config-data") pod "97070aef-f135-4c60-9cae-21a0ae49e77c" (UID: "97070aef-f135-4c60-9cae-21a0ae49e77c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.594766 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97070aef-f135-4c60-9cae-21a0ae49e77c" (UID: "97070aef-f135-4c60-9cae-21a0ae49e77c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.602415 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" (UID: "08c91ed1-21ea-4eba-83f5-b3a7073ad8b9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.646216 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fc8p\" (UniqueName: \"kubernetes.io/projected/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-kube-api-access-9fc8p\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.646251 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.646262 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.646300 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.646311 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97070aef-f135-4c60-9cae-21a0ae49e77c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.646320 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:36 crc kubenswrapper[4840]: I1209 17:20:36.646328 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qrwcr\" (UniqueName: \"kubernetes.io/projected/97070aef-f135-4c60-9cae-21a0ae49e77c-kube-api-access-qrwcr\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.001507 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08c91ed1-21ea-4eba-83f5-b3a7073ad8b9","Type":"ContainerDied","Data":"de4935ed724ff595bc8d7d52d7a74470b756f735390e1537cb48d86b40f66e1d"} Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.001593 4840 scope.go:117] "RemoveContainer" containerID="f050a7733a656aa215535651b2bb521742bad606f66b2d5df288daed44e89b03" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.001771 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.010042 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"97070aef-f135-4c60-9cae-21a0ae49e77c","Type":"ContainerDied","Data":"52f73516de704e4cc7e4c23d64d586602597855d17add269d61d5204f86ae965"} Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.010139 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.046919 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.052222 4840 scope.go:117] "RemoveContainer" containerID="60a5bc46dd93e8d35dcb489a115f6532bd932b5c75a23866ba58cdd480a6b648" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.065035 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.090044 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.101128 4840 scope.go:117] "RemoveContainer" containerID="dfac38e9bea7315daa1778d1b6ca204d863767c42a7eb63a6e83418654c03727" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.132029 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.159435 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 17:20:37 crc kubenswrapper[4840]: E1209 17:20:37.159998 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97070aef-f135-4c60-9cae-21a0ae49e77c" containerName="nova-cell1-novncproxy-novncproxy" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.160031 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="97070aef-f135-4c60-9cae-21a0ae49e77c" containerName="nova-cell1-novncproxy-novncproxy" Dec 09 17:20:37 crc kubenswrapper[4840]: E1209 17:20:37.160051 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" containerName="nova-metadata-metadata" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.160059 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" containerName="nova-metadata-metadata" Dec 09 17:20:37 crc kubenswrapper[4840]: E1209 17:20:37.160084 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" containerName="nova-metadata-log" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.160091 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" containerName="nova-metadata-log" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.160343 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" containerName="nova-metadata-metadata" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.160368 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" containerName="nova-metadata-log" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.160385 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="97070aef-f135-4c60-9cae-21a0ae49e77c" containerName="nova-cell1-novncproxy-novncproxy" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.161187 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.164492 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.164897 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.165192 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.171782 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.174353 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.178391 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.178583 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.179810 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.215578 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.267133 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.267472 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.267575 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.267652 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-config-data\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.267811 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.267901 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7n7p\" (UniqueName: \"kubernetes.io/projected/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-kube-api-access-c7n7p\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.268021 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.268129 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.268219 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7747dca1-c42d-4119-a1a4-1acb40c8dd41-logs\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.268301 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cb9q\" (UniqueName: \"kubernetes.io/projected/7747dca1-c42d-4119-a1a4-1acb40c8dd41-kube-api-access-7cb9q\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.370957 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.371062 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7n7p\" (UniqueName: \"kubernetes.io/projected/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-kube-api-access-c7n7p\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.371136 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.371216 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.371238 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7747dca1-c42d-4119-a1a4-1acb40c8dd41-logs\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.371260 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cb9q\" (UniqueName: \"kubernetes.io/projected/7747dca1-c42d-4119-a1a4-1acb40c8dd41-kube-api-access-7cb9q\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.371304 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.371335 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.371372 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.371389 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-config-data\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.376467 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-config-data\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.377780 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7747dca1-c42d-4119-a1a4-1acb40c8dd41-logs\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.378680 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.379539 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.379826 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.381398 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.382546 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.383068 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.391950 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cb9q\" (UniqueName: \"kubernetes.io/projected/7747dca1-c42d-4119-a1a4-1acb40c8dd41-kube-api-access-7cb9q\") pod \"nova-metadata-0\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.395018 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7n7p\" (UniqueName: \"kubernetes.io/projected/ae9c08f5-b74f-4a78-9f61-425b25ef2f35-kube-api-access-c7n7p\") pod \"nova-cell1-novncproxy-0\" (UID: \"ae9c08f5-b74f-4a78-9f61-425b25ef2f35\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.508909 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.521948 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 17:20:37 crc kubenswrapper[4840]: I1209 17:20:37.984558 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 17:20:37 crc kubenswrapper[4840]: W1209 17:20:37.985041 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae9c08f5_b74f_4a78_9f61_425b25ef2f35.slice/crio-49db9d0f0671fc8eff8a997e1138288cbd9cc7f9cbc1cb43c2f828c1a5f7ff96 WatchSource:0}: Error finding container 49db9d0f0671fc8eff8a997e1138288cbd9cc7f9cbc1cb43c2f828c1a5f7ff96: Status 404 returned error can't find the container with id 49db9d0f0671fc8eff8a997e1138288cbd9cc7f9cbc1cb43c2f828c1a5f7ff96 Dec 09 17:20:38 crc kubenswrapper[4840]: I1209 17:20:38.027424 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ae9c08f5-b74f-4a78-9f61-425b25ef2f35","Type":"ContainerStarted","Data":"49db9d0f0671fc8eff8a997e1138288cbd9cc7f9cbc1cb43c2f828c1a5f7ff96"} Dec 09 17:20:38 crc kubenswrapper[4840]: I1209 17:20:38.087306 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:20:38 crc kubenswrapper[4840]: W1209 17:20:38.090688 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7747dca1_c42d_4119_a1a4_1acb40c8dd41.slice/crio-dddd954cd6a2b8d27683caeef9e3852cc355284d1f25880b1ff5d85682b7adc1 WatchSource:0}: Error finding container dddd954cd6a2b8d27683caeef9e3852cc355284d1f25880b1ff5d85682b7adc1: Status 404 returned error can't find the container with id dddd954cd6a2b8d27683caeef9e3852cc355284d1f25880b1ff5d85682b7adc1 Dec 09 17:20:38 crc kubenswrapper[4840]: I1209 17:20:38.625515 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08c91ed1-21ea-4eba-83f5-b3a7073ad8b9" path="/var/lib/kubelet/pods/08c91ed1-21ea-4eba-83f5-b3a7073ad8b9/volumes" Dec 09 17:20:38 crc kubenswrapper[4840]: I1209 17:20:38.627699 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97070aef-f135-4c60-9cae-21a0ae49e77c" path="/var/lib/kubelet/pods/97070aef-f135-4c60-9cae-21a0ae49e77c/volumes" Dec 09 17:20:39 crc kubenswrapper[4840]: I1209 17:20:39.038047 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7747dca1-c42d-4119-a1a4-1acb40c8dd41","Type":"ContainerStarted","Data":"18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8"} Dec 09 17:20:39 crc kubenswrapper[4840]: I1209 17:20:39.038119 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7747dca1-c42d-4119-a1a4-1acb40c8dd41","Type":"ContainerStarted","Data":"f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6"} Dec 09 17:20:39 crc kubenswrapper[4840]: I1209 17:20:39.038134 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7747dca1-c42d-4119-a1a4-1acb40c8dd41","Type":"ContainerStarted","Data":"dddd954cd6a2b8d27683caeef9e3852cc355284d1f25880b1ff5d85682b7adc1"} Dec 09 17:20:39 crc kubenswrapper[4840]: I1209 17:20:39.042585 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ae9c08f5-b74f-4a78-9f61-425b25ef2f35","Type":"ContainerStarted","Data":"ad7391dfb23f5a10c14ea7983fe5234afb5eeec4e1023a4b4ff4b5ff13be38a0"} Dec 09 17:20:39 crc kubenswrapper[4840]: I1209 17:20:39.063151 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.063126252 podStartE2EDuration="2.063126252s" podCreationTimestamp="2025-12-09 17:20:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:20:39.056396091 +0000 UTC m=+1425.047506734" watchObservedRunningTime="2025-12-09 17:20:39.063126252 +0000 UTC m=+1425.054236915" Dec 09 17:20:39 crc kubenswrapper[4840]: I1209 17:20:39.081907 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.081887934 podStartE2EDuration="2.081887934s" podCreationTimestamp="2025-12-09 17:20:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:20:39.073526097 +0000 UTC m=+1425.064636740" watchObservedRunningTime="2025-12-09 17:20:39.081887934 +0000 UTC m=+1425.072998567" Dec 09 17:20:39 crc kubenswrapper[4840]: I1209 17:20:39.136074 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 09 17:20:39 crc kubenswrapper[4840]: I1209 17:20:39.136773 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 09 17:20:39 crc kubenswrapper[4840]: I1209 17:20:39.137288 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 09 17:20:39 crc kubenswrapper[4840]: I1209 17:20:39.140655 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.059197 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.071676 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.357009 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78468d7767-nkm2f"] Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.359755 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.383348 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78468d7767-nkm2f"] Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.446687 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-config\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.446764 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-svc\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.446880 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-sb\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.446909 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st6ms\" (UniqueName: \"kubernetes.io/projected/837fb506-c332-4b96-bb0a-95a008bc2016-kube-api-access-st6ms\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.447019 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-nb\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.447080 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-swift-storage-0\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.548916 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-sb\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.549197 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st6ms\" (UniqueName: \"kubernetes.io/projected/837fb506-c332-4b96-bb0a-95a008bc2016-kube-api-access-st6ms\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.549394 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-nb\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.549552 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-swift-storage-0\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.549695 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-config\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.549819 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-svc\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.550170 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-sb\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.550416 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-nb\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.550428 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-swift-storage-0\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.550622 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-config\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.550634 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-svc\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.574291 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st6ms\" (UniqueName: \"kubernetes.io/projected/837fb506-c332-4b96-bb0a-95a008bc2016-kube-api-access-st6ms\") pod \"dnsmasq-dns-78468d7767-nkm2f\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:40 crc kubenswrapper[4840]: I1209 17:20:40.682838 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:41 crc kubenswrapper[4840]: I1209 17:20:41.188638 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78468d7767-nkm2f"] Dec 09 17:20:41 crc kubenswrapper[4840]: W1209 17:20:41.191766 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod837fb506_c332_4b96_bb0a_95a008bc2016.slice/crio-b30e12b79dda4a77e2432c7fee3747461e335d77a63a1979e06f88343e7ae437 WatchSource:0}: Error finding container b30e12b79dda4a77e2432c7fee3747461e335d77a63a1979e06f88343e7ae437: Status 404 returned error can't find the container with id b30e12b79dda4a77e2432c7fee3747461e335d77a63a1979e06f88343e7ae437 Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.075285 4840 generic.go:334] "Generic (PLEG): container finished" podID="837fb506-c332-4b96-bb0a-95a008bc2016" containerID="786134b279d5e6484d6908c5164c77a18429c9a92d3bb0b67fb52e3a39c8f34c" exitCode=0 Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.075352 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" event={"ID":"837fb506-c332-4b96-bb0a-95a008bc2016","Type":"ContainerDied","Data":"786134b279d5e6484d6908c5164c77a18429c9a92d3bb0b67fb52e3a39c8f34c"} Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.075680 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" event={"ID":"837fb506-c332-4b96-bb0a-95a008bc2016","Type":"ContainerStarted","Data":"b30e12b79dda4a77e2432c7fee3747461e335d77a63a1979e06f88343e7ae437"} Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.509244 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.523184 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.523237 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.839218 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.839498 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="ceilometer-central-agent" containerID="cri-o://0dfe49a3f033bbd56658f0536a17ef9d93e2171beee7610921148bac2ea487bf" gracePeriod=30 Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.839709 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="sg-core" containerID="cri-o://24f1dc5067538d010ada6bbf0fae7e0d639aa607d3e774a15a26246aea8b32e9" gracePeriod=30 Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.839686 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="proxy-httpd" containerID="cri-o://6b0b4f3f887409d71bbeda9338169a446b1714a1645ef9f94bcd497a11633aa0" gracePeriod=30 Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.839772 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="ceilometer-notification-agent" containerID="cri-o://1beaf98a12ca68dd684dfb02ea642b47cb6d12878189ed764ce203ecc03d1030" gracePeriod=30 Dec 09 17:20:42 crc kubenswrapper[4840]: I1209 17:20:42.849362 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.218:3000/\": EOF" Dec 09 17:20:43 crc kubenswrapper[4840]: I1209 17:20:43.091254 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" event={"ID":"837fb506-c332-4b96-bb0a-95a008bc2016","Type":"ContainerStarted","Data":"e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221"} Dec 09 17:20:43 crc kubenswrapper[4840]: I1209 17:20:43.091811 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:43 crc kubenswrapper[4840]: I1209 17:20:43.094475 4840 generic.go:334] "Generic (PLEG): container finished" podID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerID="6b0b4f3f887409d71bbeda9338169a446b1714a1645ef9f94bcd497a11633aa0" exitCode=0 Dec 09 17:20:43 crc kubenswrapper[4840]: I1209 17:20:43.094498 4840 generic.go:334] "Generic (PLEG): container finished" podID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerID="24f1dc5067538d010ada6bbf0fae7e0d639aa607d3e774a15a26246aea8b32e9" exitCode=2 Dec 09 17:20:43 crc kubenswrapper[4840]: I1209 17:20:43.094518 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa","Type":"ContainerDied","Data":"6b0b4f3f887409d71bbeda9338169a446b1714a1645ef9f94bcd497a11633aa0"} Dec 09 17:20:43 crc kubenswrapper[4840]: I1209 17:20:43.094540 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa","Type":"ContainerDied","Data":"24f1dc5067538d010ada6bbf0fae7e0d639aa607d3e774a15a26246aea8b32e9"} Dec 09 17:20:43 crc kubenswrapper[4840]: I1209 17:20:43.117477 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" podStartSLOduration=3.117453509 podStartE2EDuration="3.117453509s" podCreationTimestamp="2025-12-09 17:20:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:20:43.108199486 +0000 UTC m=+1429.099310129" watchObservedRunningTime="2025-12-09 17:20:43.117453509 +0000 UTC m=+1429.108564142" Dec 09 17:20:43 crc kubenswrapper[4840]: I1209 17:20:43.350872 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:43 crc kubenswrapper[4840]: I1209 17:20:43.351112 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerName="nova-api-log" containerID="cri-o://c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33" gracePeriod=30 Dec 09 17:20:43 crc kubenswrapper[4840]: I1209 17:20:43.351252 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerName="nova-api-api" containerID="cri-o://ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df" gracePeriod=30 Dec 09 17:20:44 crc kubenswrapper[4840]: I1209 17:20:44.107099 4840 generic.go:334] "Generic (PLEG): container finished" podID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerID="c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33" exitCode=143 Dec 09 17:20:44 crc kubenswrapper[4840]: I1209 17:20:44.107186 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"76a53983-e1bf-48e4-9b46-4028bc79a9c9","Type":"ContainerDied","Data":"c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33"} Dec 09 17:20:44 crc kubenswrapper[4840]: I1209 17:20:44.110757 4840 generic.go:334] "Generic (PLEG): container finished" podID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerID="0dfe49a3f033bbd56658f0536a17ef9d93e2171beee7610921148bac2ea487bf" exitCode=0 Dec 09 17:20:44 crc kubenswrapper[4840]: I1209 17:20:44.110830 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa","Type":"ContainerDied","Data":"0dfe49a3f033bbd56658f0536a17ef9d93e2171beee7610921148bac2ea487bf"} Dec 09 17:20:44 crc kubenswrapper[4840]: I1209 17:20:44.221021 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:44 crc kubenswrapper[4840]: I1209 17:20:44.286297 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:44 crc kubenswrapper[4840]: I1209 17:20:44.462307 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pl44l"] Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.122647 4840 generic.go:334] "Generic (PLEG): container finished" podID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerID="1beaf98a12ca68dd684dfb02ea642b47cb6d12878189ed764ce203ecc03d1030" exitCode=0 Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.122731 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa","Type":"ContainerDied","Data":"1beaf98a12ca68dd684dfb02ea642b47cb6d12878189ed764ce203ecc03d1030"} Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.122957 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa","Type":"ContainerDied","Data":"0ddc478c2f1921274cd4e15dc10848ae430ec4cace528ca4545b6d751a60a357"} Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.122999 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ddc478c2f1921274cd4e15dc10848ae430ec4cace528ca4545b6d751a60a357" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.138854 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.241328 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-combined-ca-bundle\") pod \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.241391 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-scripts\") pod \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.241443 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fkr2\" (UniqueName: \"kubernetes.io/projected/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-kube-api-access-5fkr2\") pod \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.241601 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-run-httpd\") pod \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.242052 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-sg-core-conf-yaml\") pod \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.242076 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-config-data\") pod \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.242104 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-ceilometer-tls-certs\") pod \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.242134 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-log-httpd\") pod \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\" (UID: \"1d8f406a-a65c-41f9-a8cc-21be5af1dcaa\") " Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.242131 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" (UID: "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.242944 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" (UID: "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.243761 4840 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.243777 4840 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.259658 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-scripts" (OuterVolumeSpecName: "scripts") pod "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" (UID: "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.276160 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-kube-api-access-5fkr2" (OuterVolumeSpecName: "kube-api-access-5fkr2") pod "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" (UID: "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa"). InnerVolumeSpecName "kube-api-access-5fkr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.338181 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" (UID: "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.353004 4840 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.353034 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.353045 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fkr2\" (UniqueName: \"kubernetes.io/projected/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-kube-api-access-5fkr2\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.371310 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" (UID: "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.389288 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" (UID: "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.440577 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-config-data" (OuterVolumeSpecName: "config-data") pod "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" (UID: "1d8f406a-a65c-41f9-a8cc-21be5af1dcaa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.455069 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.455096 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:45 crc kubenswrapper[4840]: I1209 17:20:45.455114 4840 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.133066 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.133157 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pl44l" podUID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerName="registry-server" containerID="cri-o://f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1" gracePeriod=2 Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.177531 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.201810 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.221823 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:46 crc kubenswrapper[4840]: E1209 17:20:46.224034 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="proxy-httpd" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.224065 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="proxy-httpd" Dec 09 17:20:46 crc kubenswrapper[4840]: E1209 17:20:46.224089 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="sg-core" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.224115 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="sg-core" Dec 09 17:20:46 crc kubenswrapper[4840]: E1209 17:20:46.224134 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="ceilometer-notification-agent" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.224143 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="ceilometer-notification-agent" Dec 09 17:20:46 crc kubenswrapper[4840]: E1209 17:20:46.224163 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="ceilometer-central-agent" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.224183 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="ceilometer-central-agent" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.224458 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="ceilometer-notification-agent" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.224494 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="sg-core" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.224510 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="ceilometer-central-agent" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.224530 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" containerName="proxy-httpd" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.235104 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.239604 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.241265 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.244306 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.244430 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.385256 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-log-httpd\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.385681 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-run-httpd\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.385721 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.385751 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.385785 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-scripts\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.385821 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.385858 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-config-data\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.385899 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-962n2\" (UniqueName: \"kubernetes.io/projected/1353e5b0-b11a-47c1-830d-10a9ec998209-kube-api-access-962n2\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.489338 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-log-httpd\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.489404 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-run-httpd\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.489428 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.489450 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.489471 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-scripts\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.489494 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.489516 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-config-data\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.489546 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-962n2\" (UniqueName: \"kubernetes.io/projected/1353e5b0-b11a-47c1-830d-10a9ec998209-kube-api-access-962n2\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.490257 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-log-httpd\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.490470 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-run-httpd\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.500683 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.504317 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.504633 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-config-data\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.507674 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-scripts\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.508323 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.524012 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-962n2\" (UniqueName: \"kubernetes.io/projected/1353e5b0-b11a-47c1-830d-10a9ec998209-kube-api-access-962n2\") pod \"ceilometer-0\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.626432 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d8f406a-a65c-41f9-a8cc-21be5af1dcaa" path="/var/lib/kubelet/pods/1d8f406a-a65c-41f9-a8cc-21be5af1dcaa/volumes" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.644589 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.828499 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.898153 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfvb5\" (UniqueName: \"kubernetes.io/projected/95c5128d-5180-4388-a4fa-c7252e8ccdc3-kube-api-access-kfvb5\") pod \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.898416 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-catalog-content\") pod \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.898495 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-utilities\") pod \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\" (UID: \"95c5128d-5180-4388-a4fa-c7252e8ccdc3\") " Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.901283 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-utilities" (OuterVolumeSpecName: "utilities") pod "95c5128d-5180-4388-a4fa-c7252e8ccdc3" (UID: "95c5128d-5180-4388-a4fa-c7252e8ccdc3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:20:46 crc kubenswrapper[4840]: I1209 17:20:46.907132 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95c5128d-5180-4388-a4fa-c7252e8ccdc3-kube-api-access-kfvb5" (OuterVolumeSpecName: "kube-api-access-kfvb5") pod "95c5128d-5180-4388-a4fa-c7252e8ccdc3" (UID: "95c5128d-5180-4388-a4fa-c7252e8ccdc3"). InnerVolumeSpecName "kube-api-access-kfvb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.000920 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfvb5\" (UniqueName: \"kubernetes.io/projected/95c5128d-5180-4388-a4fa-c7252e8ccdc3-kube-api-access-kfvb5\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.000947 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.018335 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "95c5128d-5180-4388-a4fa-c7252e8ccdc3" (UID: "95c5128d-5180-4388-a4fa-c7252e8ccdc3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.043165 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.101872 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-config-data\") pod \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.102020 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75n6r\" (UniqueName: \"kubernetes.io/projected/76a53983-e1bf-48e4-9b46-4028bc79a9c9-kube-api-access-75n6r\") pod \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.102154 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/76a53983-e1bf-48e4-9b46-4028bc79a9c9-logs\") pod \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.102293 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-combined-ca-bundle\") pod \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\" (UID: \"76a53983-e1bf-48e4-9b46-4028bc79a9c9\") " Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.102743 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95c5128d-5180-4388-a4fa-c7252e8ccdc3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.102854 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76a53983-e1bf-48e4-9b46-4028bc79a9c9-logs" (OuterVolumeSpecName: "logs") pod "76a53983-e1bf-48e4-9b46-4028bc79a9c9" (UID: "76a53983-e1bf-48e4-9b46-4028bc79a9c9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.109090 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76a53983-e1bf-48e4-9b46-4028bc79a9c9-kube-api-access-75n6r" (OuterVolumeSpecName: "kube-api-access-75n6r") pod "76a53983-e1bf-48e4-9b46-4028bc79a9c9" (UID: "76a53983-e1bf-48e4-9b46-4028bc79a9c9"). InnerVolumeSpecName "kube-api-access-75n6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.160723 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "76a53983-e1bf-48e4-9b46-4028bc79a9c9" (UID: "76a53983-e1bf-48e4-9b46-4028bc79a9c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.164344 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-config-data" (OuterVolumeSpecName: "config-data") pod "76a53983-e1bf-48e4-9b46-4028bc79a9c9" (UID: "76a53983-e1bf-48e4-9b46-4028bc79a9c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.168322 4840 generic.go:334] "Generic (PLEG): container finished" podID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerID="ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df" exitCode=0 Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.168438 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.169092 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"76a53983-e1bf-48e4-9b46-4028bc79a9c9","Type":"ContainerDied","Data":"ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df"} Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.169152 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"76a53983-e1bf-48e4-9b46-4028bc79a9c9","Type":"ContainerDied","Data":"f0adf097bf5f520207a7cbfa51ccb01da443f332b5438e24026be8e2b0122890"} Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.169174 4840 scope.go:117] "RemoveContainer" containerID="ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.179491 4840 generic.go:334] "Generic (PLEG): container finished" podID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerID="f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1" exitCode=0 Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.179564 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pl44l" event={"ID":"95c5128d-5180-4388-a4fa-c7252e8ccdc3","Type":"ContainerDied","Data":"f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1"} Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.179699 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pl44l" event={"ID":"95c5128d-5180-4388-a4fa-c7252e8ccdc3","Type":"ContainerDied","Data":"0b3a1148e2ccd629f3e51dc7116d1478c086851898327d8b47b79888bb65f337"} Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.179614 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pl44l" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.206008 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.206033 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76a53983-e1bf-48e4-9b46-4028bc79a9c9-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.206044 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75n6r\" (UniqueName: \"kubernetes.io/projected/76a53983-e1bf-48e4-9b46-4028bc79a9c9-kube-api-access-75n6r\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.206054 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/76a53983-e1bf-48e4-9b46-4028bc79a9c9-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.206457 4840 scope.go:117] "RemoveContainer" containerID="c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.217113 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.232353 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.263261 4840 scope.go:117] "RemoveContainer" containerID="ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df" Dec 09 17:20:47 crc kubenswrapper[4840]: E1209 17:20:47.264314 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df\": container with ID starting with ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df not found: ID does not exist" containerID="ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.264352 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df"} err="failed to get container status \"ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df\": rpc error: code = NotFound desc = could not find container \"ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df\": container with ID starting with ac60d1bb71b7f586f7ee45b3a7e07398cb2e81d4f2c2c7bfa92640870c1a12df not found: ID does not exist" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.264381 4840 scope.go:117] "RemoveContainer" containerID="c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33" Dec 09 17:20:47 crc kubenswrapper[4840]: E1209 17:20:47.267064 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33\": container with ID starting with c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33 not found: ID does not exist" containerID="c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.267108 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33"} err="failed to get container status \"c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33\": rpc error: code = NotFound desc = could not find container \"c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33\": container with ID starting with c338b70ea0a0b9868cbaff11da891b41e2ac8f9100363a0a891886f481b02a33 not found: ID does not exist" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.267135 4840 scope.go:117] "RemoveContainer" containerID="f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.269138 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.289026 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:47 crc kubenswrapper[4840]: E1209 17:20:47.289532 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerName="nova-api-api" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.289555 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerName="nova-api-api" Dec 09 17:20:47 crc kubenswrapper[4840]: E1209 17:20:47.289577 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerName="nova-api-log" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.289585 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerName="nova-api-log" Dec 09 17:20:47 crc kubenswrapper[4840]: E1209 17:20:47.289610 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerName="extract-utilities" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.289619 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerName="extract-utilities" Dec 09 17:20:47 crc kubenswrapper[4840]: E1209 17:20:47.289630 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerName="extract-content" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.289639 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerName="extract-content" Dec 09 17:20:47 crc kubenswrapper[4840]: E1209 17:20:47.289671 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerName="registry-server" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.289678 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerName="registry-server" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.289905 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerName="nova-api-log" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.289929 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" containerName="nova-api-api" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.289945 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" containerName="registry-server" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.291101 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.294907 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.295269 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.295381 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.297895 4840 scope.go:117] "RemoveContainer" containerID="cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.300677 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pl44l"] Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.320553 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.338755 4840 scope.go:117] "RemoveContainer" containerID="9340a8d9e934adc2f143679e2fac9914f6b9f33f80658765eec88469f9133314" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.339095 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pl44l"] Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.411175 4840 scope.go:117] "RemoveContainer" containerID="f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.414688 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-config-data\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.414751 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-public-tls-certs\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.414822 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsdk2\" (UniqueName: \"kubernetes.io/projected/bf26580f-1c4f-43a0-88a4-906080ccec88-kube-api-access-nsdk2\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.414897 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf26580f-1c4f-43a0-88a4-906080ccec88-logs\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.414929 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.414952 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: E1209 17:20:47.418492 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1\": container with ID starting with f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1 not found: ID does not exist" containerID="f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.418531 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1"} err="failed to get container status \"f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1\": rpc error: code = NotFound desc = could not find container \"f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1\": container with ID starting with f60fa1dbc26cd5e0d401e23ac1357f716c04a713ca4f6c0735ffe784adceb3a1 not found: ID does not exist" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.418558 4840 scope.go:117] "RemoveContainer" containerID="cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3" Dec 09 17:20:47 crc kubenswrapper[4840]: E1209 17:20:47.419213 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3\": container with ID starting with cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3 not found: ID does not exist" containerID="cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.419234 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3"} err="failed to get container status \"cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3\": rpc error: code = NotFound desc = could not find container \"cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3\": container with ID starting with cd26bd57034528288baad4fb300ba1bb5c4e0a27a66b10345a5c5e3360fd7dd3 not found: ID does not exist" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.419249 4840 scope.go:117] "RemoveContainer" containerID="9340a8d9e934adc2f143679e2fac9914f6b9f33f80658765eec88469f9133314" Dec 09 17:20:47 crc kubenswrapper[4840]: E1209 17:20:47.419638 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9340a8d9e934adc2f143679e2fac9914f6b9f33f80658765eec88469f9133314\": container with ID starting with 9340a8d9e934adc2f143679e2fac9914f6b9f33f80658765eec88469f9133314 not found: ID does not exist" containerID="9340a8d9e934adc2f143679e2fac9914f6b9f33f80658765eec88469f9133314" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.419658 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9340a8d9e934adc2f143679e2fac9914f6b9f33f80658765eec88469f9133314"} err="failed to get container status \"9340a8d9e934adc2f143679e2fac9914f6b9f33f80658765eec88469f9133314\": rpc error: code = NotFound desc = could not find container \"9340a8d9e934adc2f143679e2fac9914f6b9f33f80658765eec88469f9133314\": container with ID starting with 9340a8d9e934adc2f143679e2fac9914f6b9f33f80658765eec88469f9133314 not found: ID does not exist" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.509941 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.517322 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsdk2\" (UniqueName: \"kubernetes.io/projected/bf26580f-1c4f-43a0-88a4-906080ccec88-kube-api-access-nsdk2\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.517510 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf26580f-1c4f-43a0-88a4-906080ccec88-logs\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.517582 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.517629 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.517696 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-config-data\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.517758 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-public-tls-certs\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.518835 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf26580f-1c4f-43a0-88a4-906080ccec88-logs\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.522529 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.522541 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-public-tls-certs\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.522542 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-config-data\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.522670 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.522700 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.534654 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.538327 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.539936 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsdk2\" (UniqueName: \"kubernetes.io/projected/bf26580f-1c4f-43a0-88a4-906080ccec88-kube-api-access-nsdk2\") pod \"nova-api-0\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " pod="openstack/nova-api-0" Dec 09 17:20:47 crc kubenswrapper[4840]: I1209 17:20:47.634522 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.210129 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.215729 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1353e5b0-b11a-47c1-830d-10a9ec998209","Type":"ContainerStarted","Data":"29fcf9174b02fa0ffe803a052c98071735569ee2018137928f64c8b80e88c9a2"} Dec 09 17:20:48 crc kubenswrapper[4840]: W1209 17:20:48.231065 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf26580f_1c4f_43a0_88a4_906080ccec88.slice/crio-ccf402e30bf81bcfed9849af9b37195235673941226405891991b8380328f6c0 WatchSource:0}: Error finding container ccf402e30bf81bcfed9849af9b37195235673941226405891991b8380328f6c0: Status 404 returned error can't find the container with id ccf402e30bf81bcfed9849af9b37195235673941226405891991b8380328f6c0 Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.242248 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.538150 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.220:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.538178 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.220:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.650756 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76a53983-e1bf-48e4-9b46-4028bc79a9c9" path="/var/lib/kubelet/pods/76a53983-e1bf-48e4-9b46-4028bc79a9c9/volumes" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.651505 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95c5128d-5180-4388-a4fa-c7252e8ccdc3" path="/var/lib/kubelet/pods/95c5128d-5180-4388-a4fa-c7252e8ccdc3/volumes" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.675947 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-9mn25"] Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.677246 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-9mn25"] Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.677382 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.679560 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.679817 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.717421 4840 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podc99bfc51-b110-4fca-a9eb-cf517044c149"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podc99bfc51-b110-4fca-a9eb-cf517044c149] : Timed out while waiting for systemd to remove kubepods-besteffort-podc99bfc51_b110_4fca_a9eb_cf517044c149.slice" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.863949 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-scripts\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.864011 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvvlp\" (UniqueName: \"kubernetes.io/projected/0a569cc0-6ebe-431c-af1b-3c7560dc2954-kube-api-access-zvvlp\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.864206 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-config-data\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.864268 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.966734 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-config-data\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.966802 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.966901 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-scripts\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.966922 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvvlp\" (UniqueName: \"kubernetes.io/projected/0a569cc0-6ebe-431c-af1b-3c7560dc2954-kube-api-access-zvvlp\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.973603 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-config-data\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.973792 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.980908 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-scripts\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:48 crc kubenswrapper[4840]: I1209 17:20:48.987950 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvvlp\" (UniqueName: \"kubernetes.io/projected/0a569cc0-6ebe-431c-af1b-3c7560dc2954-kube-api-access-zvvlp\") pod \"nova-cell1-cell-mapping-9mn25\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:49 crc kubenswrapper[4840]: I1209 17:20:49.056250 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:49 crc kubenswrapper[4840]: I1209 17:20:49.259294 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf26580f-1c4f-43a0-88a4-906080ccec88","Type":"ContainerStarted","Data":"bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98"} Dec 09 17:20:49 crc kubenswrapper[4840]: I1209 17:20:49.259646 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf26580f-1c4f-43a0-88a4-906080ccec88","Type":"ContainerStarted","Data":"773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b"} Dec 09 17:20:49 crc kubenswrapper[4840]: I1209 17:20:49.259687 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf26580f-1c4f-43a0-88a4-906080ccec88","Type":"ContainerStarted","Data":"ccf402e30bf81bcfed9849af9b37195235673941226405891991b8380328f6c0"} Dec 09 17:20:49 crc kubenswrapper[4840]: I1209 17:20:49.267245 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1353e5b0-b11a-47c1-830d-10a9ec998209","Type":"ContainerStarted","Data":"13ccc1f3645c9639e4ba8a87dd921b4fe2eed50843d8ca23abc6a18e97b38856"} Dec 09 17:20:49 crc kubenswrapper[4840]: I1209 17:20:49.312700 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.312680856 podStartE2EDuration="2.312680856s" podCreationTimestamp="2025-12-09 17:20:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:20:49.30436006 +0000 UTC m=+1435.295470683" watchObservedRunningTime="2025-12-09 17:20:49.312680856 +0000 UTC m=+1435.303791489" Dec 09 17:20:49 crc kubenswrapper[4840]: I1209 17:20:49.687914 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-9mn25"] Dec 09 17:20:50 crc kubenswrapper[4840]: I1209 17:20:50.278311 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1353e5b0-b11a-47c1-830d-10a9ec998209","Type":"ContainerStarted","Data":"b57a4e71deff11db39b0030e0c0a19346b3e47b90439f31d13b7978def190e22"} Dec 09 17:20:50 crc kubenswrapper[4840]: I1209 17:20:50.280842 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-9mn25" event={"ID":"0a569cc0-6ebe-431c-af1b-3c7560dc2954","Type":"ContainerStarted","Data":"17f6ef87e93d07684ffe1c68df19721aa9f2b1aaa68647880ad37249a297e4b4"} Dec 09 17:20:50 crc kubenswrapper[4840]: I1209 17:20:50.280883 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-9mn25" event={"ID":"0a569cc0-6ebe-431c-af1b-3c7560dc2954","Type":"ContainerStarted","Data":"fba6fa5043ea970c5bd5c82afc64fe6ab48331ab9866beef40e18c48ae13c210"} Dec 09 17:20:50 crc kubenswrapper[4840]: I1209 17:20:50.313599 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-9mn25" podStartSLOduration=2.313580147 podStartE2EDuration="2.313580147s" podCreationTimestamp="2025-12-09 17:20:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:20:50.296237795 +0000 UTC m=+1436.287348438" watchObservedRunningTime="2025-12-09 17:20:50.313580147 +0000 UTC m=+1436.304690780" Dec 09 17:20:50 crc kubenswrapper[4840]: I1209 17:20:50.684122 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:20:50 crc kubenswrapper[4840]: I1209 17:20:50.827834 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-tvj7t"] Dec 09 17:20:50 crc kubenswrapper[4840]: I1209 17:20:50.828142 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" podUID="6782d20a-7636-4868-a203-ef8fbf37a6c9" containerName="dnsmasq-dns" containerID="cri-o://b893b05e04e921106f02e79732ef58560a046576a6c901371ed7ef88cfafa644" gracePeriod=10 Dec 09 17:20:50 crc kubenswrapper[4840]: I1209 17:20:50.836057 4840 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod13330488-1e93-4a88-8f15-331ee0b935cf"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod13330488-1e93-4a88-8f15-331ee0b935cf] : Timed out while waiting for systemd to remove kubepods-besteffort-pod13330488_1e93_4a88_8f15_331ee0b935cf.slice" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.293594 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1353e5b0-b11a-47c1-830d-10a9ec998209","Type":"ContainerStarted","Data":"0c034f4301f6bfd4f1991026239acbc2d40aa12505a6b947e1bbefa7045b6bba"} Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.297950 4840 generic.go:334] "Generic (PLEG): container finished" podID="6782d20a-7636-4868-a203-ef8fbf37a6c9" containerID="b893b05e04e921106f02e79732ef58560a046576a6c901371ed7ef88cfafa644" exitCode=0 Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.298181 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" event={"ID":"6782d20a-7636-4868-a203-ef8fbf37a6c9","Type":"ContainerDied","Data":"b893b05e04e921106f02e79732ef58560a046576a6c901371ed7ef88cfafa644"} Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.590086 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.678898 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-sb\") pod \"6782d20a-7636-4868-a203-ef8fbf37a6c9\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.679205 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6skb7\" (UniqueName: \"kubernetes.io/projected/6782d20a-7636-4868-a203-ef8fbf37a6c9-kube-api-access-6skb7\") pod \"6782d20a-7636-4868-a203-ef8fbf37a6c9\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.679328 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-swift-storage-0\") pod \"6782d20a-7636-4868-a203-ef8fbf37a6c9\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.679449 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-config\") pod \"6782d20a-7636-4868-a203-ef8fbf37a6c9\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.679555 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-nb\") pod \"6782d20a-7636-4868-a203-ef8fbf37a6c9\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.679671 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-svc\") pod \"6782d20a-7636-4868-a203-ef8fbf37a6c9\" (UID: \"6782d20a-7636-4868-a203-ef8fbf37a6c9\") " Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.686271 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6782d20a-7636-4868-a203-ef8fbf37a6c9-kube-api-access-6skb7" (OuterVolumeSpecName: "kube-api-access-6skb7") pod "6782d20a-7636-4868-a203-ef8fbf37a6c9" (UID: "6782d20a-7636-4868-a203-ef8fbf37a6c9"). InnerVolumeSpecName "kube-api-access-6skb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.782472 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6skb7\" (UniqueName: \"kubernetes.io/projected/6782d20a-7636-4868-a203-ef8fbf37a6c9-kube-api-access-6skb7\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.831685 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6782d20a-7636-4868-a203-ef8fbf37a6c9" (UID: "6782d20a-7636-4868-a203-ef8fbf37a6c9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.843467 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6782d20a-7636-4868-a203-ef8fbf37a6c9" (UID: "6782d20a-7636-4868-a203-ef8fbf37a6c9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.863417 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6782d20a-7636-4868-a203-ef8fbf37a6c9" (UID: "6782d20a-7636-4868-a203-ef8fbf37a6c9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.863448 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-config" (OuterVolumeSpecName: "config") pod "6782d20a-7636-4868-a203-ef8fbf37a6c9" (UID: "6782d20a-7636-4868-a203-ef8fbf37a6c9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.880202 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6782d20a-7636-4868-a203-ef8fbf37a6c9" (UID: "6782d20a-7636-4868-a203-ef8fbf37a6c9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.884652 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.884800 4840 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.884855 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.884910 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:51 crc kubenswrapper[4840]: I1209 17:20:51.885000 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6782d20a-7636-4868-a203-ef8fbf37a6c9-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:52 crc kubenswrapper[4840]: I1209 17:20:52.318130 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" Dec 09 17:20:52 crc kubenswrapper[4840]: I1209 17:20:52.319238 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9cb78d75-tvj7t" event={"ID":"6782d20a-7636-4868-a203-ef8fbf37a6c9","Type":"ContainerDied","Data":"d9982f77d139e4b9b4d33c79f68abe665bd83303dafa3e86656fda9ae42e2d64"} Dec 09 17:20:52 crc kubenswrapper[4840]: I1209 17:20:52.319281 4840 scope.go:117] "RemoveContainer" containerID="b893b05e04e921106f02e79732ef58560a046576a6c901371ed7ef88cfafa644" Dec 09 17:20:52 crc kubenswrapper[4840]: I1209 17:20:52.356732 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1353e5b0-b11a-47c1-830d-10a9ec998209","Type":"ContainerStarted","Data":"ab53ad577133a2b2cdbb2393c02c3b76ace8404bcb1bbeb48dd37254b8821dbe"} Dec 09 17:20:52 crc kubenswrapper[4840]: I1209 17:20:52.357568 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 17:20:52 crc kubenswrapper[4840]: I1209 17:20:52.360714 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-tvj7t"] Dec 09 17:20:52 crc kubenswrapper[4840]: I1209 17:20:52.369858 4840 scope.go:117] "RemoveContainer" containerID="c5c8e5335c2415386f0615edf91188ab581f383408e7b1f25904ba2b64799181" Dec 09 17:20:52 crc kubenswrapper[4840]: I1209 17:20:52.372581 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-tvj7t"] Dec 09 17:20:52 crc kubenswrapper[4840]: I1209 17:20:52.386050 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.215465891 podStartE2EDuration="6.386032625s" podCreationTimestamp="2025-12-09 17:20:46 +0000 UTC" firstStartedPulling="2025-12-09 17:20:47.216950417 +0000 UTC m=+1433.208061050" lastFinishedPulling="2025-12-09 17:20:51.387517151 +0000 UTC m=+1437.378627784" observedRunningTime="2025-12-09 17:20:52.381089325 +0000 UTC m=+1438.372199958" watchObservedRunningTime="2025-12-09 17:20:52.386032625 +0000 UTC m=+1438.377143258" Dec 09 17:20:52 crc kubenswrapper[4840]: I1209 17:20:52.619057 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6782d20a-7636-4868-a203-ef8fbf37a6c9" path="/var/lib/kubelet/pods/6782d20a-7636-4868-a203-ef8fbf37a6c9/volumes" Dec 09 17:20:56 crc kubenswrapper[4840]: I1209 17:20:56.402992 4840 generic.go:334] "Generic (PLEG): container finished" podID="0a569cc0-6ebe-431c-af1b-3c7560dc2954" containerID="17f6ef87e93d07684ffe1c68df19721aa9f2b1aaa68647880ad37249a297e4b4" exitCode=0 Dec 09 17:20:56 crc kubenswrapper[4840]: I1209 17:20:56.403071 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-9mn25" event={"ID":"0a569cc0-6ebe-431c-af1b-3c7560dc2954","Type":"ContainerDied","Data":"17f6ef87e93d07684ffe1c68df19721aa9f2b1aaa68647880ad37249a297e4b4"} Dec 09 17:20:57 crc kubenswrapper[4840]: I1209 17:20:57.529336 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 09 17:20:57 crc kubenswrapper[4840]: I1209 17:20:57.531624 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 09 17:20:57 crc kubenswrapper[4840]: I1209 17:20:57.535898 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 09 17:20:57 crc kubenswrapper[4840]: I1209 17:20:57.635375 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 17:20:57 crc kubenswrapper[4840]: I1209 17:20:57.636371 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 17:20:57 crc kubenswrapper[4840]: I1209 17:20:57.881734 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.009273 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvvlp\" (UniqueName: \"kubernetes.io/projected/0a569cc0-6ebe-431c-af1b-3c7560dc2954-kube-api-access-zvvlp\") pod \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.009365 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-combined-ca-bundle\") pod \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.009455 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-scripts\") pod \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.009537 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-config-data\") pod \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\" (UID: \"0a569cc0-6ebe-431c-af1b-3c7560dc2954\") " Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.017159 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-scripts" (OuterVolumeSpecName: "scripts") pod "0a569cc0-6ebe-431c-af1b-3c7560dc2954" (UID: "0a569cc0-6ebe-431c-af1b-3c7560dc2954"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.017629 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a569cc0-6ebe-431c-af1b-3c7560dc2954-kube-api-access-zvvlp" (OuterVolumeSpecName: "kube-api-access-zvvlp") pod "0a569cc0-6ebe-431c-af1b-3c7560dc2954" (UID: "0a569cc0-6ebe-431c-af1b-3c7560dc2954"). InnerVolumeSpecName "kube-api-access-zvvlp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.042203 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a569cc0-6ebe-431c-af1b-3c7560dc2954" (UID: "0a569cc0-6ebe-431c-af1b-3c7560dc2954"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.073986 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-config-data" (OuterVolumeSpecName: "config-data") pod "0a569cc0-6ebe-431c-af1b-3c7560dc2954" (UID: "0a569cc0-6ebe-431c-af1b-3c7560dc2954"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.112016 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvvlp\" (UniqueName: \"kubernetes.io/projected/0a569cc0-6ebe-431c-af1b-3c7560dc2954-kube-api-access-zvvlp\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.112052 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.112064 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.112074 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a569cc0-6ebe-431c-af1b-3c7560dc2954-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.423084 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-9mn25" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.423089 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-9mn25" event={"ID":"0a569cc0-6ebe-431c-af1b-3c7560dc2954","Type":"ContainerDied","Data":"fba6fa5043ea970c5bd5c82afc64fe6ab48331ab9866beef40e18c48ae13c210"} Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.423541 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fba6fa5043ea970c5bd5c82afc64fe6ab48331ab9866beef40e18c48ae13c210" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.434379 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.606595 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.606851 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="91a23973-2c0e-4ef7-b1ca-9290b7c42dc8" containerName="nova-scheduler-scheduler" containerID="cri-o://b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54" gracePeriod=30 Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.630374 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.640957 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.647224 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.223:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 17:20:58 crc kubenswrapper[4840]: I1209 17:20:58.647236 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.223:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 17:20:59 crc kubenswrapper[4840]: I1209 17:20:59.455236 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerName="nova-api-log" containerID="cri-o://773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b" gracePeriod=30 Dec 09 17:20:59 crc kubenswrapper[4840]: I1209 17:20:59.455485 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerName="nova-api-api" containerID="cri-o://bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98" gracePeriod=30 Dec 09 17:21:00 crc kubenswrapper[4840]: I1209 17:21:00.467657 4840 generic.go:334] "Generic (PLEG): container finished" podID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerID="773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b" exitCode=143 Dec 09 17:21:00 crc kubenswrapper[4840]: I1209 17:21:00.467698 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf26580f-1c4f-43a0-88a4-906080ccec88","Type":"ContainerDied","Data":"773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b"} Dec 09 17:21:00 crc kubenswrapper[4840]: I1209 17:21:00.468483 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-log" containerID="cri-o://f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6" gracePeriod=30 Dec 09 17:21:00 crc kubenswrapper[4840]: I1209 17:21:00.468529 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-metadata" containerID="cri-o://18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8" gracePeriod=30 Dec 09 17:21:01 crc kubenswrapper[4840]: I1209 17:21:01.480814 4840 generic.go:334] "Generic (PLEG): container finished" podID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerID="f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6" exitCode=143 Dec 09 17:21:01 crc kubenswrapper[4840]: I1209 17:21:01.480901 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7747dca1-c42d-4119-a1a4-1acb40c8dd41","Type":"ContainerDied","Data":"f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6"} Dec 09 17:21:03 crc kubenswrapper[4840]: E1209 17:21:03.052299 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54 is running failed: container process not found" containerID="b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 17:21:03 crc kubenswrapper[4840]: E1209 17:21:03.053165 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54 is running failed: container process not found" containerID="b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 17:21:03 crc kubenswrapper[4840]: E1209 17:21:03.053452 4840 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54 is running failed: container process not found" containerID="b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 17:21:03 crc kubenswrapper[4840]: E1209 17:21:03.053480 4840 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="91a23973-2c0e-4ef7-b1ca-9290b7c42dc8" containerName="nova-scheduler-scheduler" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.418617 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.522755 4840 generic.go:334] "Generic (PLEG): container finished" podID="91a23973-2c0e-4ef7-b1ca-9290b7c42dc8" containerID="b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54" exitCode=0 Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.522811 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8","Type":"ContainerDied","Data":"b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54"} Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.522836 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.522867 4840 scope.go:117] "RemoveContainer" containerID="b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.522851 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8","Type":"ContainerDied","Data":"eda4216ef8ad7aabb0d2abb88111da701af148eda8370800c86ea9e77bd32531"} Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.531457 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-combined-ca-bundle\") pod \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.531682 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-config-data\") pod \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.531762 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7wsb\" (UniqueName: \"kubernetes.io/projected/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-kube-api-access-g7wsb\") pod \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\" (UID: \"91a23973-2c0e-4ef7-b1ca-9290b7c42dc8\") " Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.537461 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-kube-api-access-g7wsb" (OuterVolumeSpecName: "kube-api-access-g7wsb") pod "91a23973-2c0e-4ef7-b1ca-9290b7c42dc8" (UID: "91a23973-2c0e-4ef7-b1ca-9290b7c42dc8"). InnerVolumeSpecName "kube-api-access-g7wsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.549480 4840 scope.go:117] "RemoveContainer" containerID="b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54" Dec 09 17:21:03 crc kubenswrapper[4840]: E1209 17:21:03.549993 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54\": container with ID starting with b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54 not found: ID does not exist" containerID="b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.550130 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54"} err="failed to get container status \"b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54\": rpc error: code = NotFound desc = could not find container \"b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54\": container with ID starting with b826f4453e97ba2c793dacc31172367b47edd1bd1011451db4872a619c22da54 not found: ID does not exist" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.563861 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "91a23973-2c0e-4ef7-b1ca-9290b7c42dc8" (UID: "91a23973-2c0e-4ef7-b1ca-9290b7c42dc8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.583044 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-config-data" (OuterVolumeSpecName: "config-data") pod "91a23973-2c0e-4ef7-b1ca-9290b7c42dc8" (UID: "91a23973-2c0e-4ef7-b1ca-9290b7c42dc8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.612515 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.220:8775/\": read tcp 10.217.0.2:41044->10.217.0.220:8775: read: connection reset by peer" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.612549 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.220:8775/\": read tcp 10.217.0.2:41036->10.217.0.220:8775: read: connection reset by peer" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.633908 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7wsb\" (UniqueName: \"kubernetes.io/projected/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-kube-api-access-g7wsb\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.633942 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.633952 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.913670 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.928325 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.944175 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:21:03 crc kubenswrapper[4840]: E1209 17:21:03.944728 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6782d20a-7636-4868-a203-ef8fbf37a6c9" containerName="init" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.944753 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6782d20a-7636-4868-a203-ef8fbf37a6c9" containerName="init" Dec 09 17:21:03 crc kubenswrapper[4840]: E1209 17:21:03.944803 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a569cc0-6ebe-431c-af1b-3c7560dc2954" containerName="nova-manage" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.944813 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a569cc0-6ebe-431c-af1b-3c7560dc2954" containerName="nova-manage" Dec 09 17:21:03 crc kubenswrapper[4840]: E1209 17:21:03.944836 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6782d20a-7636-4868-a203-ef8fbf37a6c9" containerName="dnsmasq-dns" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.944844 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="6782d20a-7636-4868-a203-ef8fbf37a6c9" containerName="dnsmasq-dns" Dec 09 17:21:03 crc kubenswrapper[4840]: E1209 17:21:03.944856 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91a23973-2c0e-4ef7-b1ca-9290b7c42dc8" containerName="nova-scheduler-scheduler" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.944864 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="91a23973-2c0e-4ef7-b1ca-9290b7c42dc8" containerName="nova-scheduler-scheduler" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.945127 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="6782d20a-7636-4868-a203-ef8fbf37a6c9" containerName="dnsmasq-dns" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.945153 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="91a23973-2c0e-4ef7-b1ca-9290b7c42dc8" containerName="nova-scheduler-scheduler" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.945178 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a569cc0-6ebe-431c-af1b-3c7560dc2954" containerName="nova-manage" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.947619 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.957515 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 09 17:21:03 crc kubenswrapper[4840]: I1209 17:21:03.976566 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.041171 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8d64fd3-fec5-4d6c-b007-d481268bfe1b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e8d64fd3-fec5-4d6c-b007-d481268bfe1b\") " pod="openstack/nova-scheduler-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.041594 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8d64fd3-fec5-4d6c-b007-d481268bfe1b-config-data\") pod \"nova-scheduler-0\" (UID: \"e8d64fd3-fec5-4d6c-b007-d481268bfe1b\") " pod="openstack/nova-scheduler-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.041645 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5nxp\" (UniqueName: \"kubernetes.io/projected/e8d64fd3-fec5-4d6c-b007-d481268bfe1b-kube-api-access-x5nxp\") pod \"nova-scheduler-0\" (UID: \"e8d64fd3-fec5-4d6c-b007-d481268bfe1b\") " pod="openstack/nova-scheduler-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.143828 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8d64fd3-fec5-4d6c-b007-d481268bfe1b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e8d64fd3-fec5-4d6c-b007-d481268bfe1b\") " pod="openstack/nova-scheduler-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.143927 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8d64fd3-fec5-4d6c-b007-d481268bfe1b-config-data\") pod \"nova-scheduler-0\" (UID: \"e8d64fd3-fec5-4d6c-b007-d481268bfe1b\") " pod="openstack/nova-scheduler-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.143988 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5nxp\" (UniqueName: \"kubernetes.io/projected/e8d64fd3-fec5-4d6c-b007-d481268bfe1b-kube-api-access-x5nxp\") pod \"nova-scheduler-0\" (UID: \"e8d64fd3-fec5-4d6c-b007-d481268bfe1b\") " pod="openstack/nova-scheduler-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.153893 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8d64fd3-fec5-4d6c-b007-d481268bfe1b-config-data\") pod \"nova-scheduler-0\" (UID: \"e8d64fd3-fec5-4d6c-b007-d481268bfe1b\") " pod="openstack/nova-scheduler-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.153924 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8d64fd3-fec5-4d6c-b007-d481268bfe1b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e8d64fd3-fec5-4d6c-b007-d481268bfe1b\") " pod="openstack/nova-scheduler-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.174269 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5nxp\" (UniqueName: \"kubernetes.io/projected/e8d64fd3-fec5-4d6c-b007-d481268bfe1b-kube-api-access-x5nxp\") pod \"nova-scheduler-0\" (UID: \"e8d64fd3-fec5-4d6c-b007-d481268bfe1b\") " pod="openstack/nova-scheduler-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.224185 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.329432 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.347886 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-nova-metadata-tls-certs\") pod \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.347957 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-config-data\") pod \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.348040 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cb9q\" (UniqueName: \"kubernetes.io/projected/7747dca1-c42d-4119-a1a4-1acb40c8dd41-kube-api-access-7cb9q\") pod \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.348137 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-combined-ca-bundle\") pod \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.348310 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7747dca1-c42d-4119-a1a4-1acb40c8dd41-logs\") pod \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\" (UID: \"7747dca1-c42d-4119-a1a4-1acb40c8dd41\") " Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.349199 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7747dca1-c42d-4119-a1a4-1acb40c8dd41-logs" (OuterVolumeSpecName: "logs") pod "7747dca1-c42d-4119-a1a4-1acb40c8dd41" (UID: "7747dca1-c42d-4119-a1a4-1acb40c8dd41"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.352156 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7747dca1-c42d-4119-a1a4-1acb40c8dd41-kube-api-access-7cb9q" (OuterVolumeSpecName: "kube-api-access-7cb9q") pod "7747dca1-c42d-4119-a1a4-1acb40c8dd41" (UID: "7747dca1-c42d-4119-a1a4-1acb40c8dd41"). InnerVolumeSpecName "kube-api-access-7cb9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.372976 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.395470 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-config-data" (OuterVolumeSpecName: "config-data") pod "7747dca1-c42d-4119-a1a4-1acb40c8dd41" (UID: "7747dca1-c42d-4119-a1a4-1acb40c8dd41"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.400853 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7747dca1-c42d-4119-a1a4-1acb40c8dd41" (UID: "7747dca1-c42d-4119-a1a4-1acb40c8dd41"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.453116 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "7747dca1-c42d-4119-a1a4-1acb40c8dd41" (UID: "7747dca1-c42d-4119-a1a4-1acb40c8dd41"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.453501 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf26580f-1c4f-43a0-88a4-906080ccec88-logs\") pod \"bf26580f-1c4f-43a0-88a4-906080ccec88\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.453650 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-config-data\") pod \"bf26580f-1c4f-43a0-88a4-906080ccec88\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.453692 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-combined-ca-bundle\") pod \"bf26580f-1c4f-43a0-88a4-906080ccec88\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.453782 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-internal-tls-certs\") pod \"bf26580f-1c4f-43a0-88a4-906080ccec88\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.453804 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsdk2\" (UniqueName: \"kubernetes.io/projected/bf26580f-1c4f-43a0-88a4-906080ccec88-kube-api-access-nsdk2\") pod \"bf26580f-1c4f-43a0-88a4-906080ccec88\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.453856 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-public-tls-certs\") pod \"bf26580f-1c4f-43a0-88a4-906080ccec88\" (UID: \"bf26580f-1c4f-43a0-88a4-906080ccec88\") " Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.469446 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf26580f-1c4f-43a0-88a4-906080ccec88-logs" (OuterVolumeSpecName: "logs") pod "bf26580f-1c4f-43a0-88a4-906080ccec88" (UID: "bf26580f-1c4f-43a0-88a4-906080ccec88"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.470085 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf26580f-1c4f-43a0-88a4-906080ccec88-kube-api-access-nsdk2" (OuterVolumeSpecName: "kube-api-access-nsdk2") pod "bf26580f-1c4f-43a0-88a4-906080ccec88" (UID: "bf26580f-1c4f-43a0-88a4-906080ccec88"). InnerVolumeSpecName "kube-api-access-nsdk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.470792 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.470928 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf26580f-1c4f-43a0-88a4-906080ccec88-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.471014 4840 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7747dca1-c42d-4119-a1a4-1acb40c8dd41-logs\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.471072 4840 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.471138 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7747dca1-c42d-4119-a1a4-1acb40c8dd41-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.471200 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cb9q\" (UniqueName: \"kubernetes.io/projected/7747dca1-c42d-4119-a1a4-1acb40c8dd41-kube-api-access-7cb9q\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.471252 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsdk2\" (UniqueName: \"kubernetes.io/projected/bf26580f-1c4f-43a0-88a4-906080ccec88-kube-api-access-nsdk2\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.502379 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-config-data" (OuterVolumeSpecName: "config-data") pod "bf26580f-1c4f-43a0-88a4-906080ccec88" (UID: "bf26580f-1c4f-43a0-88a4-906080ccec88"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.504903 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf26580f-1c4f-43a0-88a4-906080ccec88" (UID: "bf26580f-1c4f-43a0-88a4-906080ccec88"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.516165 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "bf26580f-1c4f-43a0-88a4-906080ccec88" (UID: "bf26580f-1c4f-43a0-88a4-906080ccec88"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.535322 4840 generic.go:334] "Generic (PLEG): container finished" podID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerID="bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98" exitCode=0 Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.535376 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf26580f-1c4f-43a0-88a4-906080ccec88","Type":"ContainerDied","Data":"bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98"} Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.535419 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf26580f-1c4f-43a0-88a4-906080ccec88","Type":"ContainerDied","Data":"ccf402e30bf81bcfed9849af9b37195235673941226405891991b8380328f6c0"} Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.535436 4840 scope.go:117] "RemoveContainer" containerID="bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.535566 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.538570 4840 generic.go:334] "Generic (PLEG): container finished" podID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerID="18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8" exitCode=0 Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.538609 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7747dca1-c42d-4119-a1a4-1acb40c8dd41","Type":"ContainerDied","Data":"18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8"} Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.538625 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7747dca1-c42d-4119-a1a4-1acb40c8dd41","Type":"ContainerDied","Data":"dddd954cd6a2b8d27683caeef9e3852cc355284d1f25880b1ff5d85682b7adc1"} Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.538680 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.553110 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "bf26580f-1c4f-43a0-88a4-906080ccec88" (UID: "bf26580f-1c4f-43a0-88a4-906080ccec88"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.564093 4840 scope.go:117] "RemoveContainer" containerID="773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.572588 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.572611 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.572621 4840 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.572633 4840 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf26580f-1c4f-43a0-88a4-906080ccec88-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.591590 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.605105 4840 scope.go:117] "RemoveContainer" containerID="bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.606391 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:21:04 crc kubenswrapper[4840]: E1209 17:21:04.611588 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98\": container with ID starting with bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98 not found: ID does not exist" containerID="bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.611626 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98"} err="failed to get container status \"bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98\": rpc error: code = NotFound desc = could not find container \"bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98\": container with ID starting with bf37eab4513191a9052eb596025a5fdaaf7bf5571de38aa7279307f9dd564a98 not found: ID does not exist" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.611647 4840 scope.go:117] "RemoveContainer" containerID="773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b" Dec 09 17:21:04 crc kubenswrapper[4840]: E1209 17:21:04.612848 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b\": container with ID starting with 773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b not found: ID does not exist" containerID="773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.612899 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b"} err="failed to get container status \"773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b\": rpc error: code = NotFound desc = could not find container \"773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b\": container with ID starting with 773aaafa76e831cbf47dc222229284ba19e3915dbd75304a99459f8370a7e01b not found: ID does not exist" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.612927 4840 scope.go:117] "RemoveContainer" containerID="18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.635299 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" path="/var/lib/kubelet/pods/7747dca1-c42d-4119-a1a4-1acb40c8dd41/volumes" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.635885 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91a23973-2c0e-4ef7-b1ca-9290b7c42dc8" path="/var/lib/kubelet/pods/91a23973-2c0e-4ef7-b1ca-9290b7c42dc8/volumes" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.636652 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:21:04 crc kubenswrapper[4840]: E1209 17:21:04.636955 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-metadata" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.636988 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-metadata" Dec 09 17:21:04 crc kubenswrapper[4840]: E1209 17:21:04.637041 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerName="nova-api-api" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.637049 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerName="nova-api-api" Dec 09 17:21:04 crc kubenswrapper[4840]: E1209 17:21:04.637060 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-log" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.637066 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-log" Dec 09 17:21:04 crc kubenswrapper[4840]: E1209 17:21:04.637078 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerName="nova-api-log" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.637084 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerName="nova-api-log" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.637328 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerName="nova-api-log" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.637357 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-metadata" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.637382 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7747dca1-c42d-4119-a1a4-1acb40c8dd41" containerName="nova-metadata-log" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.637393 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf26580f-1c4f-43a0-88a4-906080ccec88" containerName="nova-api-api" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.639156 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.642018 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.642033 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.665353 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.666077 4840 scope.go:117] "RemoveContainer" containerID="f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.685161 4840 scope.go:117] "RemoveContainer" containerID="18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8" Dec 09 17:21:04 crc kubenswrapper[4840]: E1209 17:21:04.685692 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8\": container with ID starting with 18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8 not found: ID does not exist" containerID="18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.685722 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8"} err="failed to get container status \"18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8\": rpc error: code = NotFound desc = could not find container \"18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8\": container with ID starting with 18d32ef3680808bcc2af4610dbf8355013458f0193c5bd2372c7298346373db8 not found: ID does not exist" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.685743 4840 scope.go:117] "RemoveContainer" containerID="f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6" Dec 09 17:21:04 crc kubenswrapper[4840]: E1209 17:21:04.685997 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6\": container with ID starting with f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6 not found: ID does not exist" containerID="f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.686019 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6"} err="failed to get container status \"f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6\": rpc error: code = NotFound desc = could not find container \"f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6\": container with ID starting with f76c621ead5212887c4cb6e86430887389620853e72e7811148394523fc831a6 not found: ID does not exist" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.776839 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/543610b3-fd02-47ce-9bce-a112763de7bd-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.777148 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/543610b3-fd02-47ce-9bce-a112763de7bd-logs\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.777237 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcb6b\" (UniqueName: \"kubernetes.io/projected/543610b3-fd02-47ce-9bce-a112763de7bd-kube-api-access-kcb6b\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.777414 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/543610b3-fd02-47ce-9bce-a112763de7bd-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.777512 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/543610b3-fd02-47ce-9bce-a112763de7bd-config-data\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.858207 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 17:21:04 crc kubenswrapper[4840]: W1209 17:21:04.860942 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8d64fd3_fec5_4d6c_b007_d481268bfe1b.slice/crio-971c49ec3faa617badbffa8a0af2de7f5f5a104951ec66146ff927ddb83db02d WatchSource:0}: Error finding container 971c49ec3faa617badbffa8a0af2de7f5f5a104951ec66146ff927ddb83db02d: Status 404 returned error can't find the container with id 971c49ec3faa617badbffa8a0af2de7f5f5a104951ec66146ff927ddb83db02d Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.879936 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/543610b3-fd02-47ce-9bce-a112763de7bd-logs\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.880309 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcb6b\" (UniqueName: \"kubernetes.io/projected/543610b3-fd02-47ce-9bce-a112763de7bd-kube-api-access-kcb6b\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.880390 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/543610b3-fd02-47ce-9bce-a112763de7bd-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.880440 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/543610b3-fd02-47ce-9bce-a112763de7bd-config-data\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.880524 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/543610b3-fd02-47ce-9bce-a112763de7bd-logs\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.882252 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/543610b3-fd02-47ce-9bce-a112763de7bd-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.887410 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/543610b3-fd02-47ce-9bce-a112763de7bd-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.887554 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/543610b3-fd02-47ce-9bce-a112763de7bd-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.887558 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/543610b3-fd02-47ce-9bce-a112763de7bd-config-data\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.913684 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcb6b\" (UniqueName: \"kubernetes.io/projected/543610b3-fd02-47ce-9bce-a112763de7bd-kube-api-access-kcb6b\") pod \"nova-metadata-0\" (UID: \"543610b3-fd02-47ce-9bce-a112763de7bd\") " pod="openstack/nova-metadata-0" Dec 09 17:21:04 crc kubenswrapper[4840]: I1209 17:21:04.968713 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.079171 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.092416 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.107414 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.109985 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.112743 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.113049 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.113176 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.123492 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.192709 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.192777 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.192805 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-config-data\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.193072 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c12bf515-0e18-4026-ba24-0c88f099847e-logs\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.193131 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-public-tls-certs\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.193274 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbf7d\" (UniqueName: \"kubernetes.io/projected/c12bf515-0e18-4026-ba24-0c88f099847e-kube-api-access-cbf7d\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.295187 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c12bf515-0e18-4026-ba24-0c88f099847e-logs\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.295494 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-public-tls-certs\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.295527 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbf7d\" (UniqueName: \"kubernetes.io/projected/c12bf515-0e18-4026-ba24-0c88f099847e-kube-api-access-cbf7d\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.295628 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.295668 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.295689 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-config-data\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.302293 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c12bf515-0e18-4026-ba24-0c88f099847e-logs\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.304714 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-config-data\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.305179 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.308340 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-public-tls-certs\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.308697 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c12bf515-0e18-4026-ba24-0c88f099847e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.313629 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbf7d\" (UniqueName: \"kubernetes.io/projected/c12bf515-0e18-4026-ba24-0c88f099847e-kube-api-access-cbf7d\") pod \"nova-api-0\" (UID: \"c12bf515-0e18-4026-ba24-0c88f099847e\") " pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.447293 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.462143 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 17:21:05 crc kubenswrapper[4840]: W1209 17:21:05.470948 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod543610b3_fd02_47ce_9bce_a112763de7bd.slice/crio-ce6c7df5d64fc9a49ee0c78c05aab543eeca55c22cd4288d73a95aa648793ed1 WatchSource:0}: Error finding container ce6c7df5d64fc9a49ee0c78c05aab543eeca55c22cd4288d73a95aa648793ed1: Status 404 returned error can't find the container with id ce6c7df5d64fc9a49ee0c78c05aab543eeca55c22cd4288d73a95aa648793ed1 Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.586914 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e8d64fd3-fec5-4d6c-b007-d481268bfe1b","Type":"ContainerStarted","Data":"92164b3d3062b350df4ee3dc3a2de0db2d189d415eab2c44a57587576f0c7e66"} Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.587117 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e8d64fd3-fec5-4d6c-b007-d481268bfe1b","Type":"ContainerStarted","Data":"971c49ec3faa617badbffa8a0af2de7f5f5a104951ec66146ff927ddb83db02d"} Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.619104 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.619084769 podStartE2EDuration="2.619084769s" podCreationTimestamp="2025-12-09 17:21:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:21:05.61099292 +0000 UTC m=+1451.602103553" watchObservedRunningTime="2025-12-09 17:21:05.619084769 +0000 UTC m=+1451.610195402" Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.619896 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"543610b3-fd02-47ce-9bce-a112763de7bd","Type":"ContainerStarted","Data":"ce6c7df5d64fc9a49ee0c78c05aab543eeca55c22cd4288d73a95aa648793ed1"} Dec 09 17:21:05 crc kubenswrapper[4840]: W1209 17:21:05.988102 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc12bf515_0e18_4026_ba24_0c88f099847e.slice/crio-032330fff42c71f240d798d30caa6f40f51980910dd6bf01c8aeab86ec3641df WatchSource:0}: Error finding container 032330fff42c71f240d798d30caa6f40f51980910dd6bf01c8aeab86ec3641df: Status 404 returned error can't find the container with id 032330fff42c71f240d798d30caa6f40f51980910dd6bf01c8aeab86ec3641df Dec 09 17:21:05 crc kubenswrapper[4840]: I1209 17:21:05.996484 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 17:21:06 crc kubenswrapper[4840]: I1209 17:21:06.619449 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf26580f-1c4f-43a0-88a4-906080ccec88" path="/var/lib/kubelet/pods/bf26580f-1c4f-43a0-88a4-906080ccec88/volumes" Dec 09 17:21:06 crc kubenswrapper[4840]: I1209 17:21:06.632541 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c12bf515-0e18-4026-ba24-0c88f099847e","Type":"ContainerStarted","Data":"4e02540c558d4e69cdf0a5956f927c6ba0f877b07283928349491f16aa057d82"} Dec 09 17:21:06 crc kubenswrapper[4840]: I1209 17:21:06.632932 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c12bf515-0e18-4026-ba24-0c88f099847e","Type":"ContainerStarted","Data":"c8e23992d7cae310eac28dda1c4ebe0693b8f57bbcb02e8e044e4e154cf7f321"} Dec 09 17:21:06 crc kubenswrapper[4840]: I1209 17:21:06.632949 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c12bf515-0e18-4026-ba24-0c88f099847e","Type":"ContainerStarted","Data":"032330fff42c71f240d798d30caa6f40f51980910dd6bf01c8aeab86ec3641df"} Dec 09 17:21:06 crc kubenswrapper[4840]: I1209 17:21:06.634724 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"543610b3-fd02-47ce-9bce-a112763de7bd","Type":"ContainerStarted","Data":"41e33e7b4cf44a36efc5a316b8d6bff1a3304916813d176f74bfb0336be08581"} Dec 09 17:21:06 crc kubenswrapper[4840]: I1209 17:21:06.635129 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"543610b3-fd02-47ce-9bce-a112763de7bd","Type":"ContainerStarted","Data":"ea5b95be92ef6e7cc0b991a959dff565ffef456bad50000bfd620e9912559657"} Dec 09 17:21:06 crc kubenswrapper[4840]: I1209 17:21:06.670505 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=1.670487374 podStartE2EDuration="1.670487374s" podCreationTimestamp="2025-12-09 17:21:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:21:06.651357661 +0000 UTC m=+1452.642468294" watchObservedRunningTime="2025-12-09 17:21:06.670487374 +0000 UTC m=+1452.661598007" Dec 09 17:21:06 crc kubenswrapper[4840]: I1209 17:21:06.672383 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.672376408 podStartE2EDuration="2.672376408s" podCreationTimestamp="2025-12-09 17:21:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:21:06.668318513 +0000 UTC m=+1452.659429146" watchObservedRunningTime="2025-12-09 17:21:06.672376408 +0000 UTC m=+1452.663487041" Dec 09 17:21:09 crc kubenswrapper[4840]: I1209 17:21:09.330621 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 09 17:21:09 crc kubenswrapper[4840]: I1209 17:21:09.969066 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 17:21:09 crc kubenswrapper[4840]: I1209 17:21:09.969122 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 17:21:14 crc kubenswrapper[4840]: I1209 17:21:14.329975 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 09 17:21:14 crc kubenswrapper[4840]: I1209 17:21:14.371320 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 09 17:21:14 crc kubenswrapper[4840]: I1209 17:21:14.790665 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 09 17:21:14 crc kubenswrapper[4840]: I1209 17:21:14.969688 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 09 17:21:14 crc kubenswrapper[4840]: I1209 17:21:14.970213 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 09 17:21:15 crc kubenswrapper[4840]: I1209 17:21:15.449099 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 17:21:15 crc kubenswrapper[4840]: I1209 17:21:15.449166 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 17:21:15 crc kubenswrapper[4840]: I1209 17:21:15.988150 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="543610b3-fd02-47ce-9bce-a112763de7bd" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.226:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 17:21:15 crc kubenswrapper[4840]: I1209 17:21:15.988258 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="543610b3-fd02-47ce-9bce-a112763de7bd" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.226:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 17:21:16 crc kubenswrapper[4840]: I1209 17:21:16.461128 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c12bf515-0e18-4026-ba24-0c88f099847e" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.227:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 17:21:16 crc kubenswrapper[4840]: I1209 17:21:16.461225 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c12bf515-0e18-4026-ba24-0c88f099847e" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.227:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 17:21:16 crc kubenswrapper[4840]: I1209 17:21:16.654959 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 09 17:21:24 crc kubenswrapper[4840]: I1209 17:21:24.977767 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 09 17:21:24 crc kubenswrapper[4840]: I1209 17:21:24.978576 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 09 17:21:24 crc kubenswrapper[4840]: I1209 17:21:24.985038 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 09 17:21:24 crc kubenswrapper[4840]: I1209 17:21:24.985549 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 09 17:21:25 crc kubenswrapper[4840]: I1209 17:21:25.456432 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 09 17:21:25 crc kubenswrapper[4840]: I1209 17:21:25.457423 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 09 17:21:25 crc kubenswrapper[4840]: I1209 17:21:25.457733 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 09 17:21:25 crc kubenswrapper[4840]: I1209 17:21:25.471064 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 09 17:21:25 crc kubenswrapper[4840]: I1209 17:21:25.874637 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 09 17:21:25 crc kubenswrapper[4840]: I1209 17:21:25.880815 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 09 17:21:34 crc kubenswrapper[4840]: I1209 17:21:34.035870 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:21:34 crc kubenswrapper[4840]: I1209 17:21:34.036392 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:21:36 crc kubenswrapper[4840]: I1209 17:21:36.896347 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-db-sync-f6mr5"] Dec 09 17:21:36 crc kubenswrapper[4840]: I1209 17:21:36.909677 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-db-sync-f6mr5"] Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.011284 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-db-sync-trhsb"] Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.012773 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.019149 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.064868 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-sync-trhsb"] Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.164839 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/5124c5e9-268a-473a-abe6-b5d1af073124-certs\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.164903 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtfrv\" (UniqueName: \"kubernetes.io/projected/5124c5e9-268a-473a-abe6-b5d1af073124-kube-api-access-wtfrv\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.165059 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5124c5e9-268a-473a-abe6-b5d1af073124-combined-ca-bundle\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.166038 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5124c5e9-268a-473a-abe6-b5d1af073124-scripts\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.166298 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5124c5e9-268a-473a-abe6-b5d1af073124-config-data\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.280690 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5124c5e9-268a-473a-abe6-b5d1af073124-scripts\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.280806 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5124c5e9-268a-473a-abe6-b5d1af073124-config-data\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.280842 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/5124c5e9-268a-473a-abe6-b5d1af073124-certs\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.280865 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtfrv\" (UniqueName: \"kubernetes.io/projected/5124c5e9-268a-473a-abe6-b5d1af073124-kube-api-access-wtfrv\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.280920 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5124c5e9-268a-473a-abe6-b5d1af073124-combined-ca-bundle\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.287025 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5124c5e9-268a-473a-abe6-b5d1af073124-combined-ca-bundle\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.289772 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5124c5e9-268a-473a-abe6-b5d1af073124-config-data\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.290261 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5124c5e9-268a-473a-abe6-b5d1af073124-scripts\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.291311 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/5124c5e9-268a-473a-abe6-b5d1af073124-certs\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.313860 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtfrv\" (UniqueName: \"kubernetes.io/projected/5124c5e9-268a-473a-abe6-b5d1af073124-kube-api-access-wtfrv\") pod \"cloudkitty-db-sync-trhsb\" (UID: \"5124c5e9-268a-473a-abe6-b5d1af073124\") " pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.340842 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-trhsb" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.811947 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-sync-trhsb"] Dec 09 17:21:37 crc kubenswrapper[4840]: E1209 17:21:37.935445 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:21:37 crc kubenswrapper[4840]: E1209 17:21:37.935587 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:21:37 crc kubenswrapper[4840]: E1209 17:21:37.935782 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:21:37 crc kubenswrapper[4840]: E1209 17:21:37.937580 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:21:37 crc kubenswrapper[4840]: I1209 17:21:37.999368 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-sync-trhsb" event={"ID":"5124c5e9-268a-473a-abe6-b5d1af073124","Type":"ContainerStarted","Data":"2a96325eab2cc03a27730a06bcaf815e7d6b1c7d953448a0cdb95513cdb4f577"} Dec 09 17:21:38 crc kubenswrapper[4840]: E1209 17:21:38.001278 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:21:38 crc kubenswrapper[4840]: I1209 17:21:38.594672 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:21:38 crc kubenswrapper[4840]: I1209 17:21:38.595416 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="ceilometer-central-agent" containerID="cri-o://13ccc1f3645c9639e4ba8a87dd921b4fe2eed50843d8ca23abc6a18e97b38856" gracePeriod=30 Dec 09 17:21:38 crc kubenswrapper[4840]: I1209 17:21:38.595490 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="sg-core" containerID="cri-o://0c034f4301f6bfd4f1991026239acbc2d40aa12505a6b947e1bbefa7045b6bba" gracePeriod=30 Dec 09 17:21:38 crc kubenswrapper[4840]: I1209 17:21:38.595475 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="proxy-httpd" containerID="cri-o://ab53ad577133a2b2cdbb2393c02c3b76ace8404bcb1bbeb48dd37254b8821dbe" gracePeriod=30 Dec 09 17:21:38 crc kubenswrapper[4840]: I1209 17:21:38.595490 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="ceilometer-notification-agent" containerID="cri-o://b57a4e71deff11db39b0030e0c0a19346b3e47b90439f31d13b7978def190e22" gracePeriod=30 Dec 09 17:21:38 crc kubenswrapper[4840]: I1209 17:21:38.633869 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6c5b609-3028-4ff9-9bf7-88fa13784f6a" path="/var/lib/kubelet/pods/c6c5b609-3028-4ff9-9bf7-88fa13784f6a/volumes" Dec 09 17:21:39 crc kubenswrapper[4840]: I1209 17:21:39.010977 4840 generic.go:334] "Generic (PLEG): container finished" podID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerID="0c034f4301f6bfd4f1991026239acbc2d40aa12505a6b947e1bbefa7045b6bba" exitCode=2 Dec 09 17:21:39 crc kubenswrapper[4840]: I1209 17:21:39.011016 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1353e5b0-b11a-47c1-830d-10a9ec998209","Type":"ContainerDied","Data":"0c034f4301f6bfd4f1991026239acbc2d40aa12505a6b947e1bbefa7045b6bba"} Dec 09 17:21:39 crc kubenswrapper[4840]: E1209 17:21:39.012458 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:21:39 crc kubenswrapper[4840]: I1209 17:21:39.197745 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 17:21:40 crc kubenswrapper[4840]: I1209 17:21:40.023014 4840 generic.go:334] "Generic (PLEG): container finished" podID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerID="ab53ad577133a2b2cdbb2393c02c3b76ace8404bcb1bbeb48dd37254b8821dbe" exitCode=0 Dec 09 17:21:40 crc kubenswrapper[4840]: I1209 17:21:40.023863 4840 generic.go:334] "Generic (PLEG): container finished" podID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerID="13ccc1f3645c9639e4ba8a87dd921b4fe2eed50843d8ca23abc6a18e97b38856" exitCode=0 Dec 09 17:21:40 crc kubenswrapper[4840]: I1209 17:21:40.023944 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1353e5b0-b11a-47c1-830d-10a9ec998209","Type":"ContainerDied","Data":"ab53ad577133a2b2cdbb2393c02c3b76ace8404bcb1bbeb48dd37254b8821dbe"} Dec 09 17:21:40 crc kubenswrapper[4840]: I1209 17:21:40.024068 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1353e5b0-b11a-47c1-830d-10a9ec998209","Type":"ContainerDied","Data":"13ccc1f3645c9639e4ba8a87dd921b4fe2eed50843d8ca23abc6a18e97b38856"} Dec 09 17:21:40 crc kubenswrapper[4840]: I1209 17:21:40.178836 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.058211 4840 generic.go:334] "Generic (PLEG): container finished" podID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerID="b57a4e71deff11db39b0030e0c0a19346b3e47b90439f31d13b7978def190e22" exitCode=0 Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.058676 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1353e5b0-b11a-47c1-830d-10a9ec998209","Type":"ContainerDied","Data":"b57a4e71deff11db39b0030e0c0a19346b3e47b90439f31d13b7978def190e22"} Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.211671 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.414483 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-combined-ca-bundle\") pod \"1353e5b0-b11a-47c1-830d-10a9ec998209\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.414546 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-run-httpd\") pod \"1353e5b0-b11a-47c1-830d-10a9ec998209\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.414599 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-scripts\") pod \"1353e5b0-b11a-47c1-830d-10a9ec998209\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.414632 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-sg-core-conf-yaml\") pod \"1353e5b0-b11a-47c1-830d-10a9ec998209\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.414734 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-962n2\" (UniqueName: \"kubernetes.io/projected/1353e5b0-b11a-47c1-830d-10a9ec998209-kube-api-access-962n2\") pod \"1353e5b0-b11a-47c1-830d-10a9ec998209\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.414820 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-log-httpd\") pod \"1353e5b0-b11a-47c1-830d-10a9ec998209\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.414840 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-ceilometer-tls-certs\") pod \"1353e5b0-b11a-47c1-830d-10a9ec998209\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.414920 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-config-data\") pod \"1353e5b0-b11a-47c1-830d-10a9ec998209\" (UID: \"1353e5b0-b11a-47c1-830d-10a9ec998209\") " Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.414983 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1353e5b0-b11a-47c1-830d-10a9ec998209" (UID: "1353e5b0-b11a-47c1-830d-10a9ec998209"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.415374 4840 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.415576 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1353e5b0-b11a-47c1-830d-10a9ec998209" (UID: "1353e5b0-b11a-47c1-830d-10a9ec998209"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.440120 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1353e5b0-b11a-47c1-830d-10a9ec998209-kube-api-access-962n2" (OuterVolumeSpecName: "kube-api-access-962n2") pod "1353e5b0-b11a-47c1-830d-10a9ec998209" (UID: "1353e5b0-b11a-47c1-830d-10a9ec998209"). InnerVolumeSpecName "kube-api-access-962n2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.447004 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-scripts" (OuterVolumeSpecName: "scripts") pod "1353e5b0-b11a-47c1-830d-10a9ec998209" (UID: "1353e5b0-b11a-47c1-830d-10a9ec998209"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.462942 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1353e5b0-b11a-47c1-830d-10a9ec998209" (UID: "1353e5b0-b11a-47c1-830d-10a9ec998209"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.498027 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "1353e5b0-b11a-47c1-830d-10a9ec998209" (UID: "1353e5b0-b11a-47c1-830d-10a9ec998209"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.516986 4840 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.517015 4840 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.517026 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-962n2\" (UniqueName: \"kubernetes.io/projected/1353e5b0-b11a-47c1-830d-10a9ec998209-kube-api-access-962n2\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.517035 4840 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1353e5b0-b11a-47c1-830d-10a9ec998209-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.517044 4840 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.547139 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" containerName="rabbitmq" containerID="cri-o://186cb344f06d15427e74197e22e6db35af1f6bca882d7001d746a9cfabd7d7d0" gracePeriod=604796 Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.557501 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1353e5b0-b11a-47c1-830d-10a9ec998209" (UID: "1353e5b0-b11a-47c1-830d-10a9ec998209"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.567800 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-config-data" (OuterVolumeSpecName: "config-data") pod "1353e5b0-b11a-47c1-830d-10a9ec998209" (UID: "1353e5b0-b11a-47c1-830d-10a9ec998209"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.618481 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:43 crc kubenswrapper[4840]: I1209 17:21:43.618521 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1353e5b0-b11a-47c1-830d-10a9ec998209-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.071157 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1353e5b0-b11a-47c1-830d-10a9ec998209","Type":"ContainerDied","Data":"29fcf9174b02fa0ffe803a052c98071735569ee2018137928f64c8b80e88c9a2"} Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.071449 4840 scope.go:117] "RemoveContainer" containerID="ab53ad577133a2b2cdbb2393c02c3b76ace8404bcb1bbeb48dd37254b8821dbe" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.071574 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.105360 4840 scope.go:117] "RemoveContainer" containerID="0c034f4301f6bfd4f1991026239acbc2d40aa12505a6b947e1bbefa7045b6bba" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.126603 4840 scope.go:117] "RemoveContainer" containerID="b57a4e71deff11db39b0030e0c0a19346b3e47b90439f31d13b7978def190e22" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.498883 4840 scope.go:117] "RemoveContainer" containerID="13ccc1f3645c9639e4ba8a87dd921b4fe2eed50843d8ca23abc6a18e97b38856" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.566246 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.587119 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.686839 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" path="/var/lib/kubelet/pods/1353e5b0-b11a-47c1-830d-10a9ec998209/volumes" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.687909 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:21:44 crc kubenswrapper[4840]: E1209 17:21:44.690248 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="sg-core" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.690276 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="sg-core" Dec 09 17:21:44 crc kubenswrapper[4840]: E1209 17:21:44.690315 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="proxy-httpd" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.690324 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="proxy-httpd" Dec 09 17:21:44 crc kubenswrapper[4840]: E1209 17:21:44.690371 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="ceilometer-notification-agent" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.690379 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="ceilometer-notification-agent" Dec 09 17:21:44 crc kubenswrapper[4840]: E1209 17:21:44.690411 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="ceilometer-central-agent" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.690420 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="ceilometer-central-agent" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.693788 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="ceilometer-central-agent" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.693850 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="ceilometer-notification-agent" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.693896 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="sg-core" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.693914 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1353e5b0-b11a-47c1-830d-10a9ec998209" containerName="proxy-httpd" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.703343 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.703500 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.720530 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.722463 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.723419 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.824168 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-run-httpd\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.824258 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kg746\" (UniqueName: \"kubernetes.io/projected/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-kube-api-access-kg746\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.824289 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.824308 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-config-data\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.824336 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-scripts\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.824362 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.824418 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.824457 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-log-httpd\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.926770 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-run-httpd\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.926878 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kg746\" (UniqueName: \"kubernetes.io/projected/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-kube-api-access-kg746\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.926921 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.926948 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-config-data\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.927006 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-scripts\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.927047 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.927136 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.927177 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-log-httpd\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.927718 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-log-httpd\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.928000 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-run-httpd\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.933529 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-config-data\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.934032 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.934541 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.946984 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.948299 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-scripts\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:44 crc kubenswrapper[4840]: I1209 17:21:44.953636 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kg746\" (UniqueName: \"kubernetes.io/projected/9ec426c3-8fdd-42d9-9ea5-5d751112ee04-kube-api-access-kg746\") pod \"ceilometer-0\" (UID: \"9ec426c3-8fdd-42d9-9ea5-5d751112ee04\") " pod="openstack/ceilometer-0" Dec 09 17:21:45 crc kubenswrapper[4840]: I1209 17:21:45.066363 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 17:21:45 crc kubenswrapper[4840]: I1209 17:21:45.646106 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 17:21:45 crc kubenswrapper[4840]: E1209 17:21:45.753412 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:21:45 crc kubenswrapper[4840]: E1209 17:21:45.753474 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:21:45 crc kubenswrapper[4840]: E1209 17:21:45.753595 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:21:45 crc kubenswrapper[4840]: I1209 17:21:45.763691 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="7170c3b2-9d93-4736-8ade-66423bc4a081" containerName="rabbitmq" containerID="cri-o://375bef9ed5aadebfc6614b42d1a27270b8543c203da38ae787c0c3b2315b2a78" gracePeriod=604795 Dec 09 17:21:46 crc kubenswrapper[4840]: I1209 17:21:46.108129 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ec426c3-8fdd-42d9-9ea5-5d751112ee04","Type":"ContainerStarted","Data":"800fee991c48ae299369e7b8bbf7125cb23bc59fb93ecd1b223d3df00eaf17ff"} Dec 09 17:21:47 crc kubenswrapper[4840]: I1209 17:21:47.120860 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ec426c3-8fdd-42d9-9ea5-5d751112ee04","Type":"ContainerStarted","Data":"0933654e2ec24fe9cfb0ed666490940e5b79de0ec05c2a19877a94937ddf78be"} Dec 09 17:21:47 crc kubenswrapper[4840]: I1209 17:21:47.856081 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.107:5671: connect: connection refused" Dec 09 17:21:48 crc kubenswrapper[4840]: I1209 17:21:48.131476 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ec426c3-8fdd-42d9-9ea5-5d751112ee04","Type":"ContainerStarted","Data":"6888bc7f473b0332ea8a9d0683a4e0cadbc1205a1038207f19321998a9d89ee9"} Dec 09 17:21:48 crc kubenswrapper[4840]: I1209 17:21:48.156899 4840 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="7170c3b2-9d93-4736-8ade-66423bc4a081" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.108:5671: connect: connection refused" Dec 09 17:21:49 crc kubenswrapper[4840]: E1209 17:21:49.091901 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:21:49 crc kubenswrapper[4840]: I1209 17:21:49.146715 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ec426c3-8fdd-42d9-9ea5-5d751112ee04","Type":"ContainerStarted","Data":"2afc5af41da7bfa49dc1e442a8cfe8679f6b03cec5527354f639f87ecd2573f2"} Dec 09 17:21:49 crc kubenswrapper[4840]: I1209 17:21:49.146912 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 17:21:49 crc kubenswrapper[4840]: E1209 17:21:49.149567 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.158697 4840 generic.go:334] "Generic (PLEG): container finished" podID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" containerID="186cb344f06d15427e74197e22e6db35af1f6bca882d7001d746a9cfabd7d7d0" exitCode=0 Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.158772 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9b2bc342-2987-4fc2-b078-bc5aa00c063d","Type":"ContainerDied","Data":"186cb344f06d15427e74197e22e6db35af1f6bca882d7001d746a9cfabd7d7d0"} Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.159162 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9b2bc342-2987-4fc2-b078-bc5aa00c063d","Type":"ContainerDied","Data":"bd221d929f41d4077c0bace904da3e6daef22b54abc96d83269a2b79799ae9ca"} Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.159184 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd221d929f41d4077c0bace904da3e6daef22b54abc96d83269a2b79799ae9ca" Dec 09 17:21:50 crc kubenswrapper[4840]: E1209 17:21:50.161113 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.246906 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.344475 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-plugins-conf\") pod \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.344542 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-config-data\") pod \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.344570 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-erlang-cookie\") pod \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.344600 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9b2bc342-2987-4fc2-b078-bc5aa00c063d-pod-info\") pod \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.344617 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-tls\") pod \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.344636 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-server-conf\") pod \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.344662 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jb8w2\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-kube-api-access-jb8w2\") pod \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.344680 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9b2bc342-2987-4fc2-b078-bc5aa00c063d-erlang-cookie-secret\") pod \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.379262 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\") pod \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.379376 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-confd\") pod \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.379421 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-plugins\") pod \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\" (UID: \"9b2bc342-2987-4fc2-b078-bc5aa00c063d\") " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.379787 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "9b2bc342-2987-4fc2-b078-bc5aa00c063d" (UID: "9b2bc342-2987-4fc2-b078-bc5aa00c063d"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.380436 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "9b2bc342-2987-4fc2-b078-bc5aa00c063d" (UID: "9b2bc342-2987-4fc2-b078-bc5aa00c063d"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.381346 4840 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.381364 4840 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.381832 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "9b2bc342-2987-4fc2-b078-bc5aa00c063d" (UID: "9b2bc342-2987-4fc2-b078-bc5aa00c063d"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.383730 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/9b2bc342-2987-4fc2-b078-bc5aa00c063d-pod-info" (OuterVolumeSpecName: "pod-info") pod "9b2bc342-2987-4fc2-b078-bc5aa00c063d" (UID: "9b2bc342-2987-4fc2-b078-bc5aa00c063d"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.386275 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "9b2bc342-2987-4fc2-b078-bc5aa00c063d" (UID: "9b2bc342-2987-4fc2-b078-bc5aa00c063d"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.390771 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-kube-api-access-jb8w2" (OuterVolumeSpecName: "kube-api-access-jb8w2") pod "9b2bc342-2987-4fc2-b078-bc5aa00c063d" (UID: "9b2bc342-2987-4fc2-b078-bc5aa00c063d"). InnerVolumeSpecName "kube-api-access-jb8w2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.391090 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b2bc342-2987-4fc2-b078-bc5aa00c063d-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "9b2bc342-2987-4fc2-b078-bc5aa00c063d" (UID: "9b2bc342-2987-4fc2-b078-bc5aa00c063d"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.397863 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-config-data" (OuterVolumeSpecName: "config-data") pod "9b2bc342-2987-4fc2-b078-bc5aa00c063d" (UID: "9b2bc342-2987-4fc2-b078-bc5aa00c063d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.429835 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46" (OuterVolumeSpecName: "persistence") pod "9b2bc342-2987-4fc2-b078-bc5aa00c063d" (UID: "9b2bc342-2987-4fc2-b078-bc5aa00c063d"). InnerVolumeSpecName "pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.447428 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-server-conf" (OuterVolumeSpecName: "server-conf") pod "9b2bc342-2987-4fc2-b078-bc5aa00c063d" (UID: "9b2bc342-2987-4fc2-b078-bc5aa00c063d"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.486055 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.486090 4840 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9b2bc342-2987-4fc2-b078-bc5aa00c063d-pod-info\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.486101 4840 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.486112 4840 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9b2bc342-2987-4fc2-b078-bc5aa00c063d-server-conf\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.486122 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jb8w2\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-kube-api-access-jb8w2\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.486132 4840 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9b2bc342-2987-4fc2-b078-bc5aa00c063d-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.486157 4840 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\") on node \"crc\" " Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.486170 4840 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.548432 4840 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.548604 4840 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46") on node "crc" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.556123 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "9b2bc342-2987-4fc2-b078-bc5aa00c063d" (UID: "9b2bc342-2987-4fc2-b078-bc5aa00c063d"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.588335 4840 reconciler_common.go:293] "Volume detached for volume \"pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:50 crc kubenswrapper[4840]: I1209 17:21:50.588373 4840 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9b2bc342-2987-4fc2-b078-bc5aa00c063d-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.173252 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.220823 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.236437 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.253037 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 17:21:51 crc kubenswrapper[4840]: E1209 17:21:51.253610 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" containerName="setup-container" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.253635 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" containerName="setup-container" Dec 09 17:21:51 crc kubenswrapper[4840]: E1209 17:21:51.253681 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" containerName="rabbitmq" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.253690 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" containerName="rabbitmq" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.253958 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" containerName="rabbitmq" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.260080 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.265884 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.266199 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.266462 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.266658 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.267353 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.267829 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.268456 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.269146 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-gdrvc" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.413695 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.413762 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6xft\" (UniqueName: \"kubernetes.io/projected/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-kube-api-access-f6xft\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.413821 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.413881 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.413930 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.414083 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.414101 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.414127 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.414192 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-config-data\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.414229 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.414249 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.515756 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.515935 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-config-data\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.516009 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.516069 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.516148 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.516198 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6xft\" (UniqueName: \"kubernetes.io/projected/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-kube-api-access-f6xft\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.516278 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.516333 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.516389 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.516525 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.516556 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.517136 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.517134 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.517181 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.517677 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-config-data\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.518013 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.521557 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.521602 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.522643 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.523391 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.523427 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b98c978526173bf0076aa510cb6293cae39a106ae593fc3bcfda304b0021f1ac/globalmount\"" pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.524883 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.535939 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6xft\" (UniqueName: \"kubernetes.io/projected/0f054c36-b41d-4ef8-8d86-1a9ef134dba0-kube-api-access-f6xft\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.591428 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0eebc678-8301-4ad5-950c-ea0d5eac0d46\") pod \"rabbitmq-server-0\" (UID: \"0f054c36-b41d-4ef8-8d86-1a9ef134dba0\") " pod="openstack/rabbitmq-server-0" Dec 09 17:21:51 crc kubenswrapper[4840]: I1209 17:21:51.887008 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.194832 4840 generic.go:334] "Generic (PLEG): container finished" podID="7170c3b2-9d93-4736-8ade-66423bc4a081" containerID="375bef9ed5aadebfc6614b42d1a27270b8543c203da38ae787c0c3b2315b2a78" exitCode=0 Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.194956 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7170c3b2-9d93-4736-8ade-66423bc4a081","Type":"ContainerDied","Data":"375bef9ed5aadebfc6614b42d1a27270b8543c203da38ae787c0c3b2315b2a78"} Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.378586 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.625195 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b2bc342-2987-4fc2-b078-bc5aa00c063d" path="/var/lib/kubelet/pods/9b2bc342-2987-4fc2-b078-bc5aa00c063d/volumes" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.650077 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:52 crc kubenswrapper[4840]: E1209 17:21:52.701674 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:21:52 crc kubenswrapper[4840]: E1209 17:21:52.701720 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:21:52 crc kubenswrapper[4840]: E1209 17:21:52.701834 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:21:52 crc kubenswrapper[4840]: E1209 17:21:52.703074 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.847460 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\") pod \"7170c3b2-9d93-4736-8ade-66423bc4a081\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.847547 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-server-conf\") pod \"7170c3b2-9d93-4736-8ade-66423bc4a081\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.847597 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-plugins\") pod \"7170c3b2-9d93-4736-8ade-66423bc4a081\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.847711 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7170c3b2-9d93-4736-8ade-66423bc4a081-pod-info\") pod \"7170c3b2-9d93-4736-8ade-66423bc4a081\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.847752 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-confd\") pod \"7170c3b2-9d93-4736-8ade-66423bc4a081\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.847861 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-tls\") pod \"7170c3b2-9d93-4736-8ade-66423bc4a081\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.848131 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7170c3b2-9d93-4736-8ade-66423bc4a081-erlang-cookie-secret\") pod \"7170c3b2-9d93-4736-8ade-66423bc4a081\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.848315 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "7170c3b2-9d93-4736-8ade-66423bc4a081" (UID: "7170c3b2-9d93-4736-8ade-66423bc4a081"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.848765 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-config-data\") pod \"7170c3b2-9d93-4736-8ade-66423bc4a081\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.848843 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-erlang-cookie\") pod \"7170c3b2-9d93-4736-8ade-66423bc4a081\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.848901 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-plugins-conf\") pod \"7170c3b2-9d93-4736-8ade-66423bc4a081\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.848985 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5cgm\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-kube-api-access-k5cgm\") pod \"7170c3b2-9d93-4736-8ade-66423bc4a081\" (UID: \"7170c3b2-9d93-4736-8ade-66423bc4a081\") " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.849853 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "7170c3b2-9d93-4736-8ade-66423bc4a081" (UID: "7170c3b2-9d93-4736-8ade-66423bc4a081"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.850192 4840 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.850226 4840 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.850362 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "7170c3b2-9d93-4736-8ade-66423bc4a081" (UID: "7170c3b2-9d93-4736-8ade-66423bc4a081"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.853312 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/7170c3b2-9d93-4736-8ade-66423bc4a081-pod-info" (OuterVolumeSpecName: "pod-info") pod "7170c3b2-9d93-4736-8ade-66423bc4a081" (UID: "7170c3b2-9d93-4736-8ade-66423bc4a081"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.866101 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7170c3b2-9d93-4736-8ade-66423bc4a081-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "7170c3b2-9d93-4736-8ade-66423bc4a081" (UID: "7170c3b2-9d93-4736-8ade-66423bc4a081"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.875063 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "7170c3b2-9d93-4736-8ade-66423bc4a081" (UID: "7170c3b2-9d93-4736-8ade-66423bc4a081"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.876237 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-kube-api-access-k5cgm" (OuterVolumeSpecName: "kube-api-access-k5cgm") pod "7170c3b2-9d93-4736-8ade-66423bc4a081" (UID: "7170c3b2-9d93-4736-8ade-66423bc4a081"). InnerVolumeSpecName "kube-api-access-k5cgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.889392 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851" (OuterVolumeSpecName: "persistence") pod "7170c3b2-9d93-4736-8ade-66423bc4a081" (UID: "7170c3b2-9d93-4736-8ade-66423bc4a081"). InnerVolumeSpecName "pvc-63110570-9ea4-478a-8f0b-fab44f0a4851". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.896475 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-config-data" (OuterVolumeSpecName: "config-data") pod "7170c3b2-9d93-4736-8ade-66423bc4a081" (UID: "7170c3b2-9d93-4736-8ade-66423bc4a081"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.911801 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-server-conf" (OuterVolumeSpecName: "server-conf") pod "7170c3b2-9d93-4736-8ade-66423bc4a081" (UID: "7170c3b2-9d93-4736-8ade-66423bc4a081"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.953078 4840 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.953111 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5cgm\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-kube-api-access-k5cgm\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.953143 4840 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\") on node \"crc\" " Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.953153 4840 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-server-conf\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.953163 4840 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7170c3b2-9d93-4736-8ade-66423bc4a081-pod-info\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.953171 4840 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.953180 4840 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7170c3b2-9d93-4736-8ade-66423bc4a081-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.953188 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7170c3b2-9d93-4736-8ade-66423bc4a081-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.978119 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "7170c3b2-9d93-4736-8ade-66423bc4a081" (UID: "7170c3b2-9d93-4736-8ade-66423bc4a081"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.987010 4840 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 09 17:21:52 crc kubenswrapper[4840]: I1209 17:21:52.987574 4840 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-63110570-9ea4-478a-8f0b-fab44f0a4851" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851") on node "crc" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.054638 4840 reconciler_common.go:293] "Volume detached for volume \"pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.054678 4840 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7170c3b2-9d93-4736-8ade-66423bc4a081-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.219549 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0f054c36-b41d-4ef8-8d86-1a9ef134dba0","Type":"ContainerStarted","Data":"745dc7f06669c3f616564921869d84d77bf9107e317f1584796f9ce802ec94ed"} Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.229489 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7170c3b2-9d93-4736-8ade-66423bc4a081","Type":"ContainerDied","Data":"feaad1bcb61b2536e09deb3da000bf3106c627d71ae7508b017a0490aea616c4"} Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.229569 4840 scope.go:117] "RemoveContainer" containerID="375bef9ed5aadebfc6614b42d1a27270b8543c203da38ae787c0c3b2315b2a78" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.229780 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.276000 4840 scope.go:117] "RemoveContainer" containerID="2365d04c83aa2f9dd63e9cee92a15b62873fc69fa0193a589f63c05ec42bf785" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.315827 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.326459 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.346042 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 17:21:53 crc kubenswrapper[4840]: E1209 17:21:53.346906 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7170c3b2-9d93-4736-8ade-66423bc4a081" containerName="setup-container" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.346931 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7170c3b2-9d93-4736-8ade-66423bc4a081" containerName="setup-container" Dec 09 17:21:53 crc kubenswrapper[4840]: E1209 17:21:53.346977 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7170c3b2-9d93-4736-8ade-66423bc4a081" containerName="rabbitmq" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.346989 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7170c3b2-9d93-4736-8ade-66423bc4a081" containerName="rabbitmq" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.347370 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7170c3b2-9d93-4736-8ade-66423bc4a081" containerName="rabbitmq" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.348804 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.357442 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.357693 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.357896 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.358083 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-k4twf" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.358202 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.358776 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.359160 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.364365 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.464671 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/745eab59-21fe-492c-8a51-5f557f1802e3-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.464741 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.464769 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/745eab59-21fe-492c-8a51-5f557f1802e3-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.464796 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/745eab59-21fe-492c-8a51-5f557f1802e3-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.465097 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.465162 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4w5j4\" (UniqueName: \"kubernetes.io/projected/745eab59-21fe-492c-8a51-5f557f1802e3-kube-api-access-4w5j4\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.465234 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.465270 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.465330 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.465347 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/745eab59-21fe-492c-8a51-5f557f1802e3-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.465371 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/745eab59-21fe-492c-8a51-5f557f1802e3-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.567369 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/745eab59-21fe-492c-8a51-5f557f1802e3-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.567499 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.567534 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/745eab59-21fe-492c-8a51-5f557f1802e3-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.567564 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/745eab59-21fe-492c-8a51-5f557f1802e3-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.567702 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.567739 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4w5j4\" (UniqueName: \"kubernetes.io/projected/745eab59-21fe-492c-8a51-5f557f1802e3-kube-api-access-4w5j4\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.567785 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.567818 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.567866 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.567902 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/745eab59-21fe-492c-8a51-5f557f1802e3-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.569327 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/745eab59-21fe-492c-8a51-5f557f1802e3-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.568580 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.568912 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/745eab59-21fe-492c-8a51-5f557f1802e3-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.568484 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.569270 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/745eab59-21fe-492c-8a51-5f557f1802e3-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.569177 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/745eab59-21fe-492c-8a51-5f557f1802e3-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.571758 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/745eab59-21fe-492c-8a51-5f557f1802e3-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.572355 4840 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.572384 4840 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/52d5c339ee81f996f1e83b0d60c39ddb7d281130efbf18960868a994e4807c4a/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.573261 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.575287 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/745eab59-21fe-492c-8a51-5f557f1802e3-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.575985 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/745eab59-21fe-492c-8a51-5f557f1802e3-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.587103 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4w5j4\" (UniqueName: \"kubernetes.io/projected/745eab59-21fe-492c-8a51-5f557f1802e3-kube-api-access-4w5j4\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.632486 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-63110570-9ea4-478a-8f0b-fab44f0a4851\") pod \"rabbitmq-cell1-server-0\" (UID: \"745eab59-21fe-492c-8a51-5f557f1802e3\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.712518 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.976033 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-595979776c-shsxs"] Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.978501 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:53 crc kubenswrapper[4840]: I1209 17:21:53.981938 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.000556 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-595979776c-shsxs"] Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.083556 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-openstack-edpm-ipam\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.083609 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-nb\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.083646 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-svc\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.083710 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxw95\" (UniqueName: \"kubernetes.io/projected/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-kube-api-access-zxw95\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.083777 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-config\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.083814 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-sb\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.083853 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-swift-storage-0\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.175195 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 17:21:54 crc kubenswrapper[4840]: W1209 17:21:54.184209 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod745eab59_21fe_492c_8a51_5f557f1802e3.slice/crio-aa27ac6bd2df5b92c27a0f0c9a247e14ac3ceda65530eced704c2d5dc8b3996d WatchSource:0}: Error finding container aa27ac6bd2df5b92c27a0f0c9a247e14ac3ceda65530eced704c2d5dc8b3996d: Status 404 returned error can't find the container with id aa27ac6bd2df5b92c27a0f0c9a247e14ac3ceda65530eced704c2d5dc8b3996d Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.185392 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-config\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.185469 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-sb\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.185528 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-swift-storage-0\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.185602 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-openstack-edpm-ipam\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.185636 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-nb\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.185678 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-svc\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.185763 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxw95\" (UniqueName: \"kubernetes.io/projected/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-kube-api-access-zxw95\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.186704 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-openstack-edpm-ipam\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.186824 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-config\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.187138 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-nb\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.187825 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-sb\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.189250 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-swift-storage-0\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.189442 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-svc\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.207628 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxw95\" (UniqueName: \"kubernetes.io/projected/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-kube-api-access-zxw95\") pod \"dnsmasq-dns-595979776c-shsxs\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.248641 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"745eab59-21fe-492c-8a51-5f557f1802e3","Type":"ContainerStarted","Data":"aa27ac6bd2df5b92c27a0f0c9a247e14ac3ceda65530eced704c2d5dc8b3996d"} Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.307351 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.631827 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7170c3b2-9d93-4736-8ade-66423bc4a081" path="/var/lib/kubelet/pods/7170c3b2-9d93-4736-8ade-66423bc4a081/volumes" Dec 09 17:21:54 crc kubenswrapper[4840]: I1209 17:21:54.842780 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-595979776c-shsxs"] Dec 09 17:21:54 crc kubenswrapper[4840]: W1209 17:21:54.845582 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddda2a3eb_e28d_4c69_91a8_195dbce4bc85.slice/crio-54787c90ff22c6c3dcc072093be649cb7d4bab435d53fd7e45bad281c0a9b5b3 WatchSource:0}: Error finding container 54787c90ff22c6c3dcc072093be649cb7d4bab435d53fd7e45bad281c0a9b5b3: Status 404 returned error can't find the container with id 54787c90ff22c6c3dcc072093be649cb7d4bab435d53fd7e45bad281c0a9b5b3 Dec 09 17:21:55 crc kubenswrapper[4840]: I1209 17:21:55.262428 4840 generic.go:334] "Generic (PLEG): container finished" podID="dda2a3eb-e28d-4c69-91a8-195dbce4bc85" containerID="579b4084887aff8894c85c30ab717509e2c24e2ddab2eb649d384acfcf8e4473" exitCode=0 Dec 09 17:21:55 crc kubenswrapper[4840]: I1209 17:21:55.262519 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-595979776c-shsxs" event={"ID":"dda2a3eb-e28d-4c69-91a8-195dbce4bc85","Type":"ContainerDied","Data":"579b4084887aff8894c85c30ab717509e2c24e2ddab2eb649d384acfcf8e4473"} Dec 09 17:21:55 crc kubenswrapper[4840]: I1209 17:21:55.262548 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-595979776c-shsxs" event={"ID":"dda2a3eb-e28d-4c69-91a8-195dbce4bc85","Type":"ContainerStarted","Data":"54787c90ff22c6c3dcc072093be649cb7d4bab435d53fd7e45bad281c0a9b5b3"} Dec 09 17:21:55 crc kubenswrapper[4840]: I1209 17:21:55.264955 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0f054c36-b41d-4ef8-8d86-1a9ef134dba0","Type":"ContainerStarted","Data":"e7293a17954a08d62291ccd100f45a4b56c2fa3a17c799f78da4debe1990d51e"} Dec 09 17:21:56 crc kubenswrapper[4840]: I1209 17:21:56.277475 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-595979776c-shsxs" event={"ID":"dda2a3eb-e28d-4c69-91a8-195dbce4bc85","Type":"ContainerStarted","Data":"035c31487ba71fe56333099b3aec3031044ec542aecb2c4e3bf22855800be4dc"} Dec 09 17:21:56 crc kubenswrapper[4840]: I1209 17:21:56.278050 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:21:56 crc kubenswrapper[4840]: I1209 17:21:56.279378 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"745eab59-21fe-492c-8a51-5f557f1802e3","Type":"ContainerStarted","Data":"9a739f5d6a2f7b90a45cf3640d3e441aa5e9fd5ef6e2d6703beca81147e5e219"} Dec 09 17:21:56 crc kubenswrapper[4840]: I1209 17:21:56.301487 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-595979776c-shsxs" podStartSLOduration=3.301466976 podStartE2EDuration="3.301466976s" podCreationTimestamp="2025-12-09 17:21:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:21:56.299177851 +0000 UTC m=+1502.290288504" watchObservedRunningTime="2025-12-09 17:21:56.301466976 +0000 UTC m=+1502.292577619" Dec 09 17:22:02 crc kubenswrapper[4840]: I1209 17:22:02.628783 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 09 17:22:02 crc kubenswrapper[4840]: E1209 17:22:02.738349 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:22:02 crc kubenswrapper[4840]: E1209 17:22:02.738410 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:22:02 crc kubenswrapper[4840]: E1209 17:22:02.738523 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:22:02 crc kubenswrapper[4840]: E1209 17:22:02.739718 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:22:03 crc kubenswrapper[4840]: E1209 17:22:03.383846 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.036699 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.037122 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.309599 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.437781 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78468d7767-nkm2f"] Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.438088 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" podUID="837fb506-c332-4b96-bb0a-95a008bc2016" containerName="dnsmasq-dns" containerID="cri-o://e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221" gracePeriod=10 Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.564330 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5475ccd585-vtm9k"] Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.584231 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: E1209 17:22:04.637310 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.639719 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5475ccd585-vtm9k"] Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.718254 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-ovsdbserver-nb\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.718561 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-ovsdbserver-sb\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.718612 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-dns-swift-storage-0\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.718629 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hcnn\" (UniqueName: \"kubernetes.io/projected/048f4974-cd24-4291-a209-a357603a64e8-kube-api-access-9hcnn\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.718711 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-config\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.718768 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-dns-svc\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.718814 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-openstack-edpm-ipam\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.821289 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-config\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.821408 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-dns-svc\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.821466 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-openstack-edpm-ipam\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.821538 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-ovsdbserver-nb\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.821582 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-ovsdbserver-sb\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.821632 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hcnn\" (UniqueName: \"kubernetes.io/projected/048f4974-cd24-4291-a209-a357603a64e8-kube-api-access-9hcnn\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.821653 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-dns-swift-storage-0\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.822949 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-ovsdbserver-sb\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.822986 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-openstack-edpm-ipam\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.823171 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-dns-swift-storage-0\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.823194 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-dns-svc\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.823354 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-config\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.823416 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/048f4974-cd24-4291-a209-a357603a64e8-ovsdbserver-nb\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.841093 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hcnn\" (UniqueName: \"kubernetes.io/projected/048f4974-cd24-4291-a209-a357603a64e8-kube-api-access-9hcnn\") pod \"dnsmasq-dns-5475ccd585-vtm9k\" (UID: \"048f4974-cd24-4291-a209-a357603a64e8\") " pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:04 crc kubenswrapper[4840]: I1209 17:22:04.927040 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.073441 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.132679 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-st6ms\" (UniqueName: \"kubernetes.io/projected/837fb506-c332-4b96-bb0a-95a008bc2016-kube-api-access-st6ms\") pod \"837fb506-c332-4b96-bb0a-95a008bc2016\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.132766 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-swift-storage-0\") pod \"837fb506-c332-4b96-bb0a-95a008bc2016\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.132859 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-sb\") pod \"837fb506-c332-4b96-bb0a-95a008bc2016\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.132901 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-svc\") pod \"837fb506-c332-4b96-bb0a-95a008bc2016\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.132938 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-config\") pod \"837fb506-c332-4b96-bb0a-95a008bc2016\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.132955 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-nb\") pod \"837fb506-c332-4b96-bb0a-95a008bc2016\" (UID: \"837fb506-c332-4b96-bb0a-95a008bc2016\") " Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.139730 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/837fb506-c332-4b96-bb0a-95a008bc2016-kube-api-access-st6ms" (OuterVolumeSpecName: "kube-api-access-st6ms") pod "837fb506-c332-4b96-bb0a-95a008bc2016" (UID: "837fb506-c332-4b96-bb0a-95a008bc2016"). InnerVolumeSpecName "kube-api-access-st6ms". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.197859 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "837fb506-c332-4b96-bb0a-95a008bc2016" (UID: "837fb506-c332-4b96-bb0a-95a008bc2016"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.211454 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "837fb506-c332-4b96-bb0a-95a008bc2016" (UID: "837fb506-c332-4b96-bb0a-95a008bc2016"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.222853 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "837fb506-c332-4b96-bb0a-95a008bc2016" (UID: "837fb506-c332-4b96-bb0a-95a008bc2016"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.225571 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-config" (OuterVolumeSpecName: "config") pod "837fb506-c332-4b96-bb0a-95a008bc2016" (UID: "837fb506-c332-4b96-bb0a-95a008bc2016"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.230706 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "837fb506-c332-4b96-bb0a-95a008bc2016" (UID: "837fb506-c332-4b96-bb0a-95a008bc2016"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.235413 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.235447 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.235457 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.235467 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.235476 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-st6ms\" (UniqueName: \"kubernetes.io/projected/837fb506-c332-4b96-bb0a-95a008bc2016-kube-api-access-st6ms\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.235486 4840 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/837fb506-c332-4b96-bb0a-95a008bc2016-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.385479 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5475ccd585-vtm9k"] Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.420532 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" event={"ID":"048f4974-cd24-4291-a209-a357603a64e8","Type":"ContainerStarted","Data":"e17912bd4d190e55cf0267e29db166d40d5c078487312ab817d0355511450359"} Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.423229 4840 generic.go:334] "Generic (PLEG): container finished" podID="837fb506-c332-4b96-bb0a-95a008bc2016" containerID="e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221" exitCode=0 Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.423293 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.423434 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" event={"ID":"837fb506-c332-4b96-bb0a-95a008bc2016","Type":"ContainerDied","Data":"e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221"} Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.423531 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78468d7767-nkm2f" event={"ID":"837fb506-c332-4b96-bb0a-95a008bc2016","Type":"ContainerDied","Data":"b30e12b79dda4a77e2432c7fee3747461e335d77a63a1979e06f88343e7ae437"} Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.423631 4840 scope.go:117] "RemoveContainer" containerID="e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.458451 4840 scope.go:117] "RemoveContainer" containerID="786134b279d5e6484d6908c5164c77a18429c9a92d3bb0b67fb52e3a39c8f34c" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.468041 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78468d7767-nkm2f"] Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.482755 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78468d7767-nkm2f"] Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.498410 4840 scope.go:117] "RemoveContainer" containerID="e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221" Dec 09 17:22:05 crc kubenswrapper[4840]: E1209 17:22:05.503127 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221\": container with ID starting with e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221 not found: ID does not exist" containerID="e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.503289 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221"} err="failed to get container status \"e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221\": rpc error: code = NotFound desc = could not find container \"e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221\": container with ID starting with e6367fd861c137a12ff481665aa62471cbbc4c20137adafa1883e5b95b5c5221 not found: ID does not exist" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.503406 4840 scope.go:117] "RemoveContainer" containerID="786134b279d5e6484d6908c5164c77a18429c9a92d3bb0b67fb52e3a39c8f34c" Dec 09 17:22:05 crc kubenswrapper[4840]: E1209 17:22:05.503953 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"786134b279d5e6484d6908c5164c77a18429c9a92d3bb0b67fb52e3a39c8f34c\": container with ID starting with 786134b279d5e6484d6908c5164c77a18429c9a92d3bb0b67fb52e3a39c8f34c not found: ID does not exist" containerID="786134b279d5e6484d6908c5164c77a18429c9a92d3bb0b67fb52e3a39c8f34c" Dec 09 17:22:05 crc kubenswrapper[4840]: I1209 17:22:05.504010 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"786134b279d5e6484d6908c5164c77a18429c9a92d3bb0b67fb52e3a39c8f34c"} err="failed to get container status \"786134b279d5e6484d6908c5164c77a18429c9a92d3bb0b67fb52e3a39c8f34c\": rpc error: code = NotFound desc = could not find container \"786134b279d5e6484d6908c5164c77a18429c9a92d3bb0b67fb52e3a39c8f34c\": container with ID starting with 786134b279d5e6484d6908c5164c77a18429c9a92d3bb0b67fb52e3a39c8f34c not found: ID does not exist" Dec 09 17:22:06 crc kubenswrapper[4840]: I1209 17:22:06.434372 4840 generic.go:334] "Generic (PLEG): container finished" podID="048f4974-cd24-4291-a209-a357603a64e8" containerID="7615bbc52d4e92b5c1921cc4b14c496c9807f1daa750b0cd5b1d334e2dedc86f" exitCode=0 Dec 09 17:22:06 crc kubenswrapper[4840]: I1209 17:22:06.434475 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" event={"ID":"048f4974-cd24-4291-a209-a357603a64e8","Type":"ContainerDied","Data":"7615bbc52d4e92b5c1921cc4b14c496c9807f1daa750b0cd5b1d334e2dedc86f"} Dec 09 17:22:06 crc kubenswrapper[4840]: I1209 17:22:06.619852 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="837fb506-c332-4b96-bb0a-95a008bc2016" path="/var/lib/kubelet/pods/837fb506-c332-4b96-bb0a-95a008bc2016/volumes" Dec 09 17:22:07 crc kubenswrapper[4840]: I1209 17:22:07.450138 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" event={"ID":"048f4974-cd24-4291-a209-a357603a64e8","Type":"ContainerStarted","Data":"3048da4e796c63b8cd0f2dea1652c5ff31645a5d11e5989fdc4ab859f598b8e0"} Dec 09 17:22:07 crc kubenswrapper[4840]: I1209 17:22:07.450678 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:07 crc kubenswrapper[4840]: I1209 17:22:07.486717 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" podStartSLOduration=3.48669346 podStartE2EDuration="3.48669346s" podCreationTimestamp="2025-12-09 17:22:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:22:07.474527985 +0000 UTC m=+1513.465638628" watchObservedRunningTime="2025-12-09 17:22:07.48669346 +0000 UTC m=+1513.477804103" Dec 09 17:22:14 crc kubenswrapper[4840]: I1209 17:22:14.929207 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5475ccd585-vtm9k" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.019172 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-595979776c-shsxs"] Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.019508 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-595979776c-shsxs" podUID="dda2a3eb-e28d-4c69-91a8-195dbce4bc85" containerName="dnsmasq-dns" containerID="cri-o://035c31487ba71fe56333099b3aec3031044ec542aecb2c4e3bf22855800be4dc" gracePeriod=10 Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.550348 4840 generic.go:334] "Generic (PLEG): container finished" podID="dda2a3eb-e28d-4c69-91a8-195dbce4bc85" containerID="035c31487ba71fe56333099b3aec3031044ec542aecb2c4e3bf22855800be4dc" exitCode=0 Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.550422 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-595979776c-shsxs" event={"ID":"dda2a3eb-e28d-4c69-91a8-195dbce4bc85","Type":"ContainerDied","Data":"035c31487ba71fe56333099b3aec3031044ec542aecb2c4e3bf22855800be4dc"} Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.683494 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.753023 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-svc\") pod \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.753130 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-sb\") pod \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.753202 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxw95\" (UniqueName: \"kubernetes.io/projected/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-kube-api-access-zxw95\") pod \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.753243 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-openstack-edpm-ipam\") pod \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.753261 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-swift-storage-0\") pod \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.753367 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-nb\") pod \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.753417 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-config\") pod \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\" (UID: \"dda2a3eb-e28d-4c69-91a8-195dbce4bc85\") " Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.768196 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-kube-api-access-zxw95" (OuterVolumeSpecName: "kube-api-access-zxw95") pod "dda2a3eb-e28d-4c69-91a8-195dbce4bc85" (UID: "dda2a3eb-e28d-4c69-91a8-195dbce4bc85"). InnerVolumeSpecName "kube-api-access-zxw95". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.827887 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "dda2a3eb-e28d-4c69-91a8-195dbce4bc85" (UID: "dda2a3eb-e28d-4c69-91a8-195dbce4bc85"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.827990 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "dda2a3eb-e28d-4c69-91a8-195dbce4bc85" (UID: "dda2a3eb-e28d-4c69-91a8-195dbce4bc85"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.842624 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dda2a3eb-e28d-4c69-91a8-195dbce4bc85" (UID: "dda2a3eb-e28d-4c69-91a8-195dbce4bc85"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.848694 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dda2a3eb-e28d-4c69-91a8-195dbce4bc85" (UID: "dda2a3eb-e28d-4c69-91a8-195dbce4bc85"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.848806 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-config" (OuterVolumeSpecName: "config") pod "dda2a3eb-e28d-4c69-91a8-195dbce4bc85" (UID: "dda2a3eb-e28d-4c69-91a8-195dbce4bc85"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.857232 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxw95\" (UniqueName: \"kubernetes.io/projected/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-kube-api-access-zxw95\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.857265 4840 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.857275 4840 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.857287 4840 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-config\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.857317 4840 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.857327 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.869162 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dda2a3eb-e28d-4c69-91a8-195dbce4bc85" (UID: "dda2a3eb-e28d-4c69-91a8-195dbce4bc85"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:22:15 crc kubenswrapper[4840]: I1209 17:22:15.959666 4840 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dda2a3eb-e28d-4c69-91a8-195dbce4bc85-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:16 crc kubenswrapper[4840]: I1209 17:22:16.568648 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-595979776c-shsxs" event={"ID":"dda2a3eb-e28d-4c69-91a8-195dbce4bc85","Type":"ContainerDied","Data":"54787c90ff22c6c3dcc072093be649cb7d4bab435d53fd7e45bad281c0a9b5b3"} Dec 09 17:22:16 crc kubenswrapper[4840]: I1209 17:22:16.568998 4840 scope.go:117] "RemoveContainer" containerID="035c31487ba71fe56333099b3aec3031044ec542aecb2c4e3bf22855800be4dc" Dec 09 17:22:16 crc kubenswrapper[4840]: I1209 17:22:16.568772 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-595979776c-shsxs" Dec 09 17:22:16 crc kubenswrapper[4840]: I1209 17:22:16.603015 4840 scope.go:117] "RemoveContainer" containerID="579b4084887aff8894c85c30ab717509e2c24e2ddab2eb649d384acfcf8e4473" Dec 09 17:22:16 crc kubenswrapper[4840]: I1209 17:22:16.649592 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-595979776c-shsxs"] Dec 09 17:22:16 crc kubenswrapper[4840]: I1209 17:22:16.669770 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-595979776c-shsxs"] Dec 09 17:22:18 crc kubenswrapper[4840]: E1209 17:22:18.612694 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:22:18 crc kubenswrapper[4840]: I1209 17:22:18.627327 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dda2a3eb-e28d-4c69-91a8-195dbce4bc85" path="/var/lib/kubelet/pods/dda2a3eb-e28d-4c69-91a8-195dbce4bc85/volumes" Dec 09 17:22:19 crc kubenswrapper[4840]: E1209 17:22:19.700826 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:22:19 crc kubenswrapper[4840]: E1209 17:22:19.701109 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:22:19 crc kubenswrapper[4840]: E1209 17:22:19.701235 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:22:19 crc kubenswrapper[4840]: E1209 17:22:19.702473 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:22:26 crc kubenswrapper[4840]: I1209 17:22:26.696702 4840 generic.go:334] "Generic (PLEG): container finished" podID="0f054c36-b41d-4ef8-8d86-1a9ef134dba0" containerID="e7293a17954a08d62291ccd100f45a4b56c2fa3a17c799f78da4debe1990d51e" exitCode=0 Dec 09 17:22:26 crc kubenswrapper[4840]: I1209 17:22:26.696800 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0f054c36-b41d-4ef8-8d86-1a9ef134dba0","Type":"ContainerDied","Data":"e7293a17954a08d62291ccd100f45a4b56c2fa3a17c799f78da4debe1990d51e"} Dec 09 17:22:27 crc kubenswrapper[4840]: I1209 17:22:27.709761 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0f054c36-b41d-4ef8-8d86-1a9ef134dba0","Type":"ContainerStarted","Data":"ad0439ae373a132166543cea32b55ee446c8ecb214f7a28abb201165fe91d5a1"} Dec 09 17:22:27 crc kubenswrapper[4840]: I1209 17:22:27.710318 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 09 17:22:27 crc kubenswrapper[4840]: I1209 17:22:27.735846 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.735824355 podStartE2EDuration="36.735824355s" podCreationTimestamp="2025-12-09 17:21:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:22:27.734587659 +0000 UTC m=+1533.725698292" watchObservedRunningTime="2025-12-09 17:22:27.735824355 +0000 UTC m=+1533.726934998" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.318742 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g"] Dec 09 17:22:28 crc kubenswrapper[4840]: E1209 17:22:28.319570 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dda2a3eb-e28d-4c69-91a8-195dbce4bc85" containerName="init" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.319594 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="dda2a3eb-e28d-4c69-91a8-195dbce4bc85" containerName="init" Dec 09 17:22:28 crc kubenswrapper[4840]: E1209 17:22:28.319619 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="837fb506-c332-4b96-bb0a-95a008bc2016" containerName="dnsmasq-dns" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.319627 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="837fb506-c332-4b96-bb0a-95a008bc2016" containerName="dnsmasq-dns" Dec 09 17:22:28 crc kubenswrapper[4840]: E1209 17:22:28.319651 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="837fb506-c332-4b96-bb0a-95a008bc2016" containerName="init" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.319659 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="837fb506-c332-4b96-bb0a-95a008bc2016" containerName="init" Dec 09 17:22:28 crc kubenswrapper[4840]: E1209 17:22:28.319676 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dda2a3eb-e28d-4c69-91a8-195dbce4bc85" containerName="dnsmasq-dns" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.319683 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="dda2a3eb-e28d-4c69-91a8-195dbce4bc85" containerName="dnsmasq-dns" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.319916 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="837fb506-c332-4b96-bb0a-95a008bc2016" containerName="dnsmasq-dns" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.319946 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="dda2a3eb-e28d-4c69-91a8-195dbce4bc85" containerName="dnsmasq-dns" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.330214 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.333371 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.333631 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.333749 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qrgfg" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.333885 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.334080 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g"] Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.415346 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.415485 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh9km\" (UniqueName: \"kubernetes.io/projected/afb99ad0-9b95-4cad-a689-01347c7013c1-kube-api-access-mh9km\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.415539 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.415573 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.532472 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.532670 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.533026 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh9km\" (UniqueName: \"kubernetes.io/projected/afb99ad0-9b95-4cad-a689-01347c7013c1-kube-api-access-mh9km\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.533797 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.541703 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.549874 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.550610 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.556618 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh9km\" (UniqueName: \"kubernetes.io/projected/afb99ad0-9b95-4cad-a689-01347c7013c1-kube-api-access-mh9km\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:28 crc kubenswrapper[4840]: I1209 17:22:28.658779 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:29 crc kubenswrapper[4840]: I1209 17:22:29.248644 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g"] Dec 09 17:22:29 crc kubenswrapper[4840]: I1209 17:22:29.734373 4840 generic.go:334] "Generic (PLEG): container finished" podID="745eab59-21fe-492c-8a51-5f557f1802e3" containerID="9a739f5d6a2f7b90a45cf3640d3e441aa5e9fd5ef6e2d6703beca81147e5e219" exitCode=0 Dec 09 17:22:29 crc kubenswrapper[4840]: I1209 17:22:29.734481 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"745eab59-21fe-492c-8a51-5f557f1802e3","Type":"ContainerDied","Data":"9a739f5d6a2f7b90a45cf3640d3e441aa5e9fd5ef6e2d6703beca81147e5e219"} Dec 09 17:22:29 crc kubenswrapper[4840]: I1209 17:22:29.737004 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" event={"ID":"afb99ad0-9b95-4cad-a689-01347c7013c1","Type":"ContainerStarted","Data":"e47f86e686e072a3e5546e1338c5c73e2b6ad0dee6911169d602f76f87168484"} Dec 09 17:22:30 crc kubenswrapper[4840]: I1209 17:22:30.752707 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"745eab59-21fe-492c-8a51-5f557f1802e3","Type":"ContainerStarted","Data":"cc57e9d4eb991a2010df822f974c1f7ebd4ee34519f0f4cdc81c622b0a96a06d"} Dec 09 17:22:30 crc kubenswrapper[4840]: I1209 17:22:30.753344 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:22:30 crc kubenswrapper[4840]: I1209 17:22:30.785694 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.785674688 podStartE2EDuration="37.785674688s" podCreationTimestamp="2025-12-09 17:21:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:22:30.775787887 +0000 UTC m=+1536.766898530" watchObservedRunningTime="2025-12-09 17:22:30.785674688 +0000 UTC m=+1536.776785321" Dec 09 17:22:31 crc kubenswrapper[4840]: E1209 17:22:31.612787 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:22:33 crc kubenswrapper[4840]: E1209 17:22:33.740133 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:22:33 crc kubenswrapper[4840]: E1209 17:22:33.740812 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:22:33 crc kubenswrapper[4840]: E1209 17:22:33.741028 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:22:33 crc kubenswrapper[4840]: E1209 17:22:33.742294 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:22:34 crc kubenswrapper[4840]: I1209 17:22:34.036082 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:22:34 crc kubenswrapper[4840]: I1209 17:22:34.036147 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:22:34 crc kubenswrapper[4840]: I1209 17:22:34.036200 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:22:34 crc kubenswrapper[4840]: I1209 17:22:34.036922 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:22:34 crc kubenswrapper[4840]: I1209 17:22:34.037002 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" gracePeriod=600 Dec 09 17:22:35 crc kubenswrapper[4840]: I1209 17:22:35.812913 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" exitCode=0 Dec 09 17:22:35 crc kubenswrapper[4840]: I1209 17:22:35.812995 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3"} Dec 09 17:22:35 crc kubenswrapper[4840]: I1209 17:22:35.813230 4840 scope.go:117] "RemoveContainer" containerID="05e98f63a75f1d00a4b05aafffb49ac3d5f6082b4645459897faa5f48fc3ff01" Dec 09 17:22:41 crc kubenswrapper[4840]: I1209 17:22:41.890161 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 09 17:22:42 crc kubenswrapper[4840]: E1209 17:22:42.033002 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:22:42 crc kubenswrapper[4840]: I1209 17:22:42.910372 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" event={"ID":"afb99ad0-9b95-4cad-a689-01347c7013c1","Type":"ContainerStarted","Data":"bf932d2ba8f0f6f7968cecc574d591bf49b749f505f03a6caf42b085ae4d2a15"} Dec 09 17:22:42 crc kubenswrapper[4840]: I1209 17:22:42.920279 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:22:42 crc kubenswrapper[4840]: E1209 17:22:42.920546 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:22:42 crc kubenswrapper[4840]: I1209 17:22:42.934423 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" podStartSLOduration=2.059965111 podStartE2EDuration="14.934405633s" podCreationTimestamp="2025-12-09 17:22:28 +0000 UTC" firstStartedPulling="2025-12-09 17:22:29.247982869 +0000 UTC m=+1535.239093502" lastFinishedPulling="2025-12-09 17:22:42.122423391 +0000 UTC m=+1548.113534024" observedRunningTime="2025-12-09 17:22:42.93146679 +0000 UTC m=+1548.922577433" watchObservedRunningTime="2025-12-09 17:22:42.934405633 +0000 UTC m=+1548.925516266" Dec 09 17:22:43 crc kubenswrapper[4840]: I1209 17:22:43.715150 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 09 17:22:44 crc kubenswrapper[4840]: E1209 17:22:44.641380 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:22:45 crc kubenswrapper[4840]: E1209 17:22:45.610313 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:22:54 crc kubenswrapper[4840]: I1209 17:22:54.032346 4840 generic.go:334] "Generic (PLEG): container finished" podID="afb99ad0-9b95-4cad-a689-01347c7013c1" containerID="bf932d2ba8f0f6f7968cecc574d591bf49b749f505f03a6caf42b085ae4d2a15" exitCode=0 Dec 09 17:22:54 crc kubenswrapper[4840]: I1209 17:22:54.032466 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" event={"ID":"afb99ad0-9b95-4cad-a689-01347c7013c1","Type":"ContainerDied","Data":"bf932d2ba8f0f6f7968cecc574d591bf49b749f505f03a6caf42b085ae4d2a15"} Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.579870 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.705955 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-inventory\") pod \"afb99ad0-9b95-4cad-a689-01347c7013c1\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.706055 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-repo-setup-combined-ca-bundle\") pod \"afb99ad0-9b95-4cad-a689-01347c7013c1\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.706181 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh9km\" (UniqueName: \"kubernetes.io/projected/afb99ad0-9b95-4cad-a689-01347c7013c1-kube-api-access-mh9km\") pod \"afb99ad0-9b95-4cad-a689-01347c7013c1\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.706480 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-ssh-key\") pod \"afb99ad0-9b95-4cad-a689-01347c7013c1\" (UID: \"afb99ad0-9b95-4cad-a689-01347c7013c1\") " Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.711811 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "afb99ad0-9b95-4cad-a689-01347c7013c1" (UID: "afb99ad0-9b95-4cad-a689-01347c7013c1"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.713117 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afb99ad0-9b95-4cad-a689-01347c7013c1-kube-api-access-mh9km" (OuterVolumeSpecName: "kube-api-access-mh9km") pod "afb99ad0-9b95-4cad-a689-01347c7013c1" (UID: "afb99ad0-9b95-4cad-a689-01347c7013c1"). InnerVolumeSpecName "kube-api-access-mh9km". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.742703 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-inventory" (OuterVolumeSpecName: "inventory") pod "afb99ad0-9b95-4cad-a689-01347c7013c1" (UID: "afb99ad0-9b95-4cad-a689-01347c7013c1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.743570 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "afb99ad0-9b95-4cad-a689-01347c7013c1" (UID: "afb99ad0-9b95-4cad-a689-01347c7013c1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.811248 4840 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.811295 4840 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.811308 4840 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afb99ad0-9b95-4cad-a689-01347c7013c1-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:55 crc kubenswrapper[4840]: I1209 17:22:55.811319 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mh9km\" (UniqueName: \"kubernetes.io/projected/afb99ad0-9b95-4cad-a689-01347c7013c1-kube-api-access-mh9km\") on node \"crc\" DevicePath \"\"" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.057112 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" event={"ID":"afb99ad0-9b95-4cad-a689-01347c7013c1","Type":"ContainerDied","Data":"e47f86e686e072a3e5546e1338c5c73e2b6ad0dee6911169d602f76f87168484"} Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.057145 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e47f86e686e072a3e5546e1338c5c73e2b6ad0dee6911169d602f76f87168484" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.057196 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.148449 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v"] Dec 09 17:22:56 crc kubenswrapper[4840]: E1209 17:22:56.149243 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afb99ad0-9b95-4cad-a689-01347c7013c1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.149352 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="afb99ad0-9b95-4cad-a689-01347c7013c1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.149599 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="afb99ad0-9b95-4cad-a689-01347c7013c1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.150432 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.152508 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.152819 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.152942 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qrgfg" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.152825 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.159513 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v"] Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.218198 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff66q\" (UniqueName: \"kubernetes.io/projected/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-kube-api-access-ff66q\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4fr4v\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.218370 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4fr4v\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.218407 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4fr4v\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.320228 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4fr4v\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.320324 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4fr4v\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.321307 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff66q\" (UniqueName: \"kubernetes.io/projected/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-kube-api-access-ff66q\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4fr4v\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.325126 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4fr4v\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.335758 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4fr4v\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.338243 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff66q\" (UniqueName: \"kubernetes.io/projected/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-kube-api-access-ff66q\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4fr4v\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.528041 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:22:56 crc kubenswrapper[4840]: I1209 17:22:56.609368 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:22:56 crc kubenswrapper[4840]: E1209 17:22:56.609712 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:22:57 crc kubenswrapper[4840]: I1209 17:22:57.088441 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v"] Dec 09 17:22:57 crc kubenswrapper[4840]: E1209 17:22:57.609767 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:22:58 crc kubenswrapper[4840]: I1209 17:22:58.078387 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" event={"ID":"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91","Type":"ContainerStarted","Data":"d3288b27732a93aafde357c306c9cba33bd666d1220a6954e2c285a599eef66c"} Dec 09 17:22:58 crc kubenswrapper[4840]: I1209 17:22:58.078734 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" event={"ID":"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91","Type":"ContainerStarted","Data":"8c958521e805314d111229140131369673e85f26d9030e18139ba1bda483fb6e"} Dec 09 17:22:58 crc kubenswrapper[4840]: I1209 17:22:58.103307 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" podStartSLOduration=1.443891654 podStartE2EDuration="2.103286119s" podCreationTimestamp="2025-12-09 17:22:56 +0000 UTC" firstStartedPulling="2025-12-09 17:22:57.094804442 +0000 UTC m=+1563.085915075" lastFinishedPulling="2025-12-09 17:22:57.754198897 +0000 UTC m=+1563.745309540" observedRunningTime="2025-12-09 17:22:58.096158977 +0000 UTC m=+1564.087269610" watchObservedRunningTime="2025-12-09 17:22:58.103286119 +0000 UTC m=+1564.094396752" Dec 09 17:22:59 crc kubenswrapper[4840]: E1209 17:22:59.610416 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:23:01 crc kubenswrapper[4840]: I1209 17:23:01.115169 4840 generic.go:334] "Generic (PLEG): container finished" podID="c9a73b5a-5966-4c16-9d5c-7ad9765e4b91" containerID="d3288b27732a93aafde357c306c9cba33bd666d1220a6954e2c285a599eef66c" exitCode=0 Dec 09 17:23:01 crc kubenswrapper[4840]: I1209 17:23:01.115310 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" event={"ID":"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91","Type":"ContainerDied","Data":"d3288b27732a93aafde357c306c9cba33bd666d1220a6954e2c285a599eef66c"} Dec 09 17:23:02 crc kubenswrapper[4840]: I1209 17:23:02.782653 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:23:02 crc kubenswrapper[4840]: I1209 17:23:02.855779 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ff66q\" (UniqueName: \"kubernetes.io/projected/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-kube-api-access-ff66q\") pod \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " Dec 09 17:23:02 crc kubenswrapper[4840]: I1209 17:23:02.855839 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-ssh-key\") pod \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " Dec 09 17:23:02 crc kubenswrapper[4840]: I1209 17:23:02.855956 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-inventory\") pod \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\" (UID: \"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91\") " Dec 09 17:23:02 crc kubenswrapper[4840]: I1209 17:23:02.861750 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-kube-api-access-ff66q" (OuterVolumeSpecName: "kube-api-access-ff66q") pod "c9a73b5a-5966-4c16-9d5c-7ad9765e4b91" (UID: "c9a73b5a-5966-4c16-9d5c-7ad9765e4b91"). InnerVolumeSpecName "kube-api-access-ff66q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:23:02 crc kubenswrapper[4840]: I1209 17:23:02.889851 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-inventory" (OuterVolumeSpecName: "inventory") pod "c9a73b5a-5966-4c16-9d5c-7ad9765e4b91" (UID: "c9a73b5a-5966-4c16-9d5c-7ad9765e4b91"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:23:02 crc kubenswrapper[4840]: I1209 17:23:02.894724 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c9a73b5a-5966-4c16-9d5c-7ad9765e4b91" (UID: "c9a73b5a-5966-4c16-9d5c-7ad9765e4b91"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:23:02 crc kubenswrapper[4840]: I1209 17:23:02.958701 4840 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 17:23:02 crc kubenswrapper[4840]: I1209 17:23:02.958735 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ff66q\" (UniqueName: \"kubernetes.io/projected/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-kube-api-access-ff66q\") on node \"crc\" DevicePath \"\"" Dec 09 17:23:02 crc kubenswrapper[4840]: I1209 17:23:02.958747 4840 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9a73b5a-5966-4c16-9d5c-7ad9765e4b91-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.138502 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" event={"ID":"c9a73b5a-5966-4c16-9d5c-7ad9765e4b91","Type":"ContainerDied","Data":"8c958521e805314d111229140131369673e85f26d9030e18139ba1bda483fb6e"} Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.138548 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c958521e805314d111229140131369673e85f26d9030e18139ba1bda483fb6e" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.138576 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4fr4v" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.224796 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn"] Dec 09 17:23:03 crc kubenswrapper[4840]: E1209 17:23:03.225310 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9a73b5a-5966-4c16-9d5c-7ad9765e4b91" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.225333 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9a73b5a-5966-4c16-9d5c-7ad9765e4b91" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.225619 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9a73b5a-5966-4c16-9d5c-7ad9765e4b91" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.226541 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.230179 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.230329 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qrgfg" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.230179 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.230372 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.254167 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn"] Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.265098 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.265493 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttdql\" (UniqueName: \"kubernetes.io/projected/07769bef-a8d2-452e-af4d-c33e9c99da4b-kube-api-access-ttdql\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.265664 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.267679 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.369718 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttdql\" (UniqueName: \"kubernetes.io/projected/07769bef-a8d2-452e-af4d-c33e9c99da4b-kube-api-access-ttdql\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.370049 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.370286 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.370547 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.375848 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.376068 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.377805 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.391287 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttdql\" (UniqueName: \"kubernetes.io/projected/07769bef-a8d2-452e-af4d-c33e9c99da4b-kube-api-access-ttdql\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:03 crc kubenswrapper[4840]: I1209 17:23:03.559024 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:23:04 crc kubenswrapper[4840]: W1209 17:23:04.099019 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07769bef_a8d2_452e_af4d_c33e9c99da4b.slice/crio-79e776cb787ca905851d8ae5c50413a6e4dd8c42fd9d3511191e097040e121ba WatchSource:0}: Error finding container 79e776cb787ca905851d8ae5c50413a6e4dd8c42fd9d3511191e097040e121ba: Status 404 returned error can't find the container with id 79e776cb787ca905851d8ae5c50413a6e4dd8c42fd9d3511191e097040e121ba Dec 09 17:23:04 crc kubenswrapper[4840]: I1209 17:23:04.110617 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn"] Dec 09 17:23:04 crc kubenswrapper[4840]: I1209 17:23:04.148606 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" event={"ID":"07769bef-a8d2-452e-af4d-c33e9c99da4b","Type":"ContainerStarted","Data":"79e776cb787ca905851d8ae5c50413a6e4dd8c42fd9d3511191e097040e121ba"} Dec 09 17:23:06 crc kubenswrapper[4840]: I1209 17:23:06.173888 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" event={"ID":"07769bef-a8d2-452e-af4d-c33e9c99da4b","Type":"ContainerStarted","Data":"9f9d9cc22efa1c07a220c156154dbad24295a02e0cba2744925f6eaead20d527"} Dec 09 17:23:06 crc kubenswrapper[4840]: I1209 17:23:06.202201 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" podStartSLOduration=2.369066422 podStartE2EDuration="3.202174305s" podCreationTimestamp="2025-12-09 17:23:03 +0000 UTC" firstStartedPulling="2025-12-09 17:23:04.103169624 +0000 UTC m=+1570.094280257" lastFinishedPulling="2025-12-09 17:23:04.936277507 +0000 UTC m=+1570.927388140" observedRunningTime="2025-12-09 17:23:06.199466668 +0000 UTC m=+1572.190577311" watchObservedRunningTime="2025-12-09 17:23:06.202174305 +0000 UTC m=+1572.193284948" Dec 09 17:23:07 crc kubenswrapper[4840]: I1209 17:23:07.608490 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:23:07 crc kubenswrapper[4840]: E1209 17:23:07.609083 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:23:09 crc kubenswrapper[4840]: I1209 17:23:09.002802 4840 scope.go:117] "RemoveContainer" containerID="00adfa2e3245da69d9392e24a34b9a3e60188a038c6325fa7e0dcd165b2ecf83" Dec 09 17:23:09 crc kubenswrapper[4840]: I1209 17:23:09.041678 4840 scope.go:117] "RemoveContainer" containerID="5244fe0fbc7b7d1f4ccadb212c2501526ed510a64f3173064b1051ad0057d3ec" Dec 09 17:23:09 crc kubenswrapper[4840]: I1209 17:23:09.103043 4840 scope.go:117] "RemoveContainer" containerID="186cb344f06d15427e74197e22e6db35af1f6bca882d7001d746a9cfabd7d7d0" Dec 09 17:23:10 crc kubenswrapper[4840]: E1209 17:23:10.730402 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:23:10 crc kubenswrapper[4840]: E1209 17:23:10.732201 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:23:10 crc kubenswrapper[4840]: E1209 17:23:10.732581 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:23:10 crc kubenswrapper[4840]: E1209 17:23:10.734089 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:23:11 crc kubenswrapper[4840]: E1209 17:23:11.610919 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:23:20 crc kubenswrapper[4840]: I1209 17:23:20.609551 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:23:20 crc kubenswrapper[4840]: E1209 17:23:20.610564 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:23:24 crc kubenswrapper[4840]: E1209 17:23:24.624379 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:23:25 crc kubenswrapper[4840]: E1209 17:23:25.711001 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:23:25 crc kubenswrapper[4840]: E1209 17:23:25.711086 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:23:25 crc kubenswrapper[4840]: E1209 17:23:25.711247 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:23:25 crc kubenswrapper[4840]: E1209 17:23:25.712491 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:23:34 crc kubenswrapper[4840]: I1209 17:23:34.617134 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:23:34 crc kubenswrapper[4840]: E1209 17:23:34.618748 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:23:35 crc kubenswrapper[4840]: E1209 17:23:35.610271 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:23:38 crc kubenswrapper[4840]: I1209 17:23:38.892042 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-shtcd"] Dec 09 17:23:38 crc kubenswrapper[4840]: I1209 17:23:38.895419 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:38 crc kubenswrapper[4840]: I1209 17:23:38.914772 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-shtcd"] Dec 09 17:23:38 crc kubenswrapper[4840]: I1209 17:23:38.997114 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-utilities\") pod \"community-operators-shtcd\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:38 crc kubenswrapper[4840]: I1209 17:23:38.997402 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-catalog-content\") pod \"community-operators-shtcd\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:38 crc kubenswrapper[4840]: I1209 17:23:38.997461 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w774r\" (UniqueName: \"kubernetes.io/projected/57304ae0-5067-4763-9c5a-c0f87f485677-kube-api-access-w774r\") pod \"community-operators-shtcd\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:39 crc kubenswrapper[4840]: I1209 17:23:39.099410 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-catalog-content\") pod \"community-operators-shtcd\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:39 crc kubenswrapper[4840]: I1209 17:23:39.099478 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w774r\" (UniqueName: \"kubernetes.io/projected/57304ae0-5067-4763-9c5a-c0f87f485677-kube-api-access-w774r\") pod \"community-operators-shtcd\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:39 crc kubenswrapper[4840]: I1209 17:23:39.099523 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-utilities\") pod \"community-operators-shtcd\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:39 crc kubenswrapper[4840]: I1209 17:23:39.100209 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-utilities\") pod \"community-operators-shtcd\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:39 crc kubenswrapper[4840]: I1209 17:23:39.100213 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-catalog-content\") pod \"community-operators-shtcd\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:39 crc kubenswrapper[4840]: I1209 17:23:39.122080 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w774r\" (UniqueName: \"kubernetes.io/projected/57304ae0-5067-4763-9c5a-c0f87f485677-kube-api-access-w774r\") pod \"community-operators-shtcd\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:39 crc kubenswrapper[4840]: I1209 17:23:39.221129 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:39 crc kubenswrapper[4840]: I1209 17:23:39.801689 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-shtcd"] Dec 09 17:23:40 crc kubenswrapper[4840]: I1209 17:23:40.556010 4840 generic.go:334] "Generic (PLEG): container finished" podID="57304ae0-5067-4763-9c5a-c0f87f485677" containerID="ef6338e847c6b08879f9df83068f8308aee3c9d6cd17e29c082d8af16015cc03" exitCode=0 Dec 09 17:23:40 crc kubenswrapper[4840]: I1209 17:23:40.556119 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-shtcd" event={"ID":"57304ae0-5067-4763-9c5a-c0f87f485677","Type":"ContainerDied","Data":"ef6338e847c6b08879f9df83068f8308aee3c9d6cd17e29c082d8af16015cc03"} Dec 09 17:23:40 crc kubenswrapper[4840]: I1209 17:23:40.556262 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-shtcd" event={"ID":"57304ae0-5067-4763-9c5a-c0f87f485677","Type":"ContainerStarted","Data":"89b2e7771f957a6602f00f67cae2ae5686abfc925f34a72525158ce917aebf14"} Dec 09 17:23:40 crc kubenswrapper[4840]: I1209 17:23:40.558761 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 17:23:40 crc kubenswrapper[4840]: E1209 17:23:40.611078 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:23:42 crc kubenswrapper[4840]: I1209 17:23:42.586248 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-shtcd" event={"ID":"57304ae0-5067-4763-9c5a-c0f87f485677","Type":"ContainerStarted","Data":"61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c"} Dec 09 17:23:43 crc kubenswrapper[4840]: I1209 17:23:43.598783 4840 generic.go:334] "Generic (PLEG): container finished" podID="57304ae0-5067-4763-9c5a-c0f87f485677" containerID="61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c" exitCode=0 Dec 09 17:23:43 crc kubenswrapper[4840]: I1209 17:23:43.598860 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-shtcd" event={"ID":"57304ae0-5067-4763-9c5a-c0f87f485677","Type":"ContainerDied","Data":"61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c"} Dec 09 17:23:44 crc kubenswrapper[4840]: I1209 17:23:44.629992 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-shtcd" event={"ID":"57304ae0-5067-4763-9c5a-c0f87f485677","Type":"ContainerStarted","Data":"1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d"} Dec 09 17:23:44 crc kubenswrapper[4840]: I1209 17:23:44.647434 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-shtcd" podStartSLOduration=3.09854508 podStartE2EDuration="6.647411988s" podCreationTimestamp="2025-12-09 17:23:38 +0000 UTC" firstStartedPulling="2025-12-09 17:23:40.558507621 +0000 UTC m=+1606.549618254" lastFinishedPulling="2025-12-09 17:23:44.107374529 +0000 UTC m=+1610.098485162" observedRunningTime="2025-12-09 17:23:44.644815625 +0000 UTC m=+1610.635926278" watchObservedRunningTime="2025-12-09 17:23:44.647411988 +0000 UTC m=+1610.638522641" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.593316 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8dw82"] Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.595982 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.607372 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8dw82"] Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.608385 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:23:47 crc kubenswrapper[4840]: E1209 17:23:47.608828 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.704749 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-catalog-content\") pod \"redhat-marketplace-8dw82\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.704868 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nn4jv\" (UniqueName: \"kubernetes.io/projected/3751a56c-df7c-4c23-9950-29fe5bf6dc45-kube-api-access-nn4jv\") pod \"redhat-marketplace-8dw82\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.705043 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-utilities\") pod \"redhat-marketplace-8dw82\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.807200 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nn4jv\" (UniqueName: \"kubernetes.io/projected/3751a56c-df7c-4c23-9950-29fe5bf6dc45-kube-api-access-nn4jv\") pod \"redhat-marketplace-8dw82\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.807320 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-utilities\") pod \"redhat-marketplace-8dw82\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.807419 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-catalog-content\") pod \"redhat-marketplace-8dw82\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.807875 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-catalog-content\") pod \"redhat-marketplace-8dw82\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.807905 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-utilities\") pod \"redhat-marketplace-8dw82\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.828723 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nn4jv\" (UniqueName: \"kubernetes.io/projected/3751a56c-df7c-4c23-9950-29fe5bf6dc45-kube-api-access-nn4jv\") pod \"redhat-marketplace-8dw82\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:47 crc kubenswrapper[4840]: I1209 17:23:47.927582 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:48 crc kubenswrapper[4840]: I1209 17:23:48.444631 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8dw82"] Dec 09 17:23:48 crc kubenswrapper[4840]: E1209 17:23:48.610516 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:23:48 crc kubenswrapper[4840]: I1209 17:23:48.684827 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8dw82" event={"ID":"3751a56c-df7c-4c23-9950-29fe5bf6dc45","Type":"ContainerStarted","Data":"3ff719bbb43e2f9f37f36cf3522c9009971a7d1f1b1010eb211300f3a18b8fa3"} Dec 09 17:23:49 crc kubenswrapper[4840]: I1209 17:23:49.221772 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:49 crc kubenswrapper[4840]: I1209 17:23:49.221864 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:49 crc kubenswrapper[4840]: I1209 17:23:49.306177 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:49 crc kubenswrapper[4840]: I1209 17:23:49.699396 4840 generic.go:334] "Generic (PLEG): container finished" podID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" containerID="f8ab17189f5f4a2ad365a2fea6ce4a9eb8eeb77adb76760ffafb17fd5c0e041d" exitCode=0 Dec 09 17:23:49 crc kubenswrapper[4840]: I1209 17:23:49.700389 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8dw82" event={"ID":"3751a56c-df7c-4c23-9950-29fe5bf6dc45","Type":"ContainerDied","Data":"f8ab17189f5f4a2ad365a2fea6ce4a9eb8eeb77adb76760ffafb17fd5c0e041d"} Dec 09 17:23:49 crc kubenswrapper[4840]: I1209 17:23:49.759944 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:50 crc kubenswrapper[4840]: I1209 17:23:50.711434 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8dw82" event={"ID":"3751a56c-df7c-4c23-9950-29fe5bf6dc45","Type":"ContainerStarted","Data":"cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777"} Dec 09 17:23:51 crc kubenswrapper[4840]: I1209 17:23:51.573204 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-shtcd"] Dec 09 17:23:51 crc kubenswrapper[4840]: I1209 17:23:51.727655 4840 generic.go:334] "Generic (PLEG): container finished" podID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" containerID="cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777" exitCode=0 Dec 09 17:23:51 crc kubenswrapper[4840]: I1209 17:23:51.727705 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8dw82" event={"ID":"3751a56c-df7c-4c23-9950-29fe5bf6dc45","Type":"ContainerDied","Data":"cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777"} Dec 09 17:23:51 crc kubenswrapper[4840]: I1209 17:23:51.728360 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-shtcd" podUID="57304ae0-5067-4763-9c5a-c0f87f485677" containerName="registry-server" containerID="cri-o://1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d" gracePeriod=2 Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.239827 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.305185 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-utilities\") pod \"57304ae0-5067-4763-9c5a-c0f87f485677\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.305559 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-catalog-content\") pod \"57304ae0-5067-4763-9c5a-c0f87f485677\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.305733 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w774r\" (UniqueName: \"kubernetes.io/projected/57304ae0-5067-4763-9c5a-c0f87f485677-kube-api-access-w774r\") pod \"57304ae0-5067-4763-9c5a-c0f87f485677\" (UID: \"57304ae0-5067-4763-9c5a-c0f87f485677\") " Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.305899 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-utilities" (OuterVolumeSpecName: "utilities") pod "57304ae0-5067-4763-9c5a-c0f87f485677" (UID: "57304ae0-5067-4763-9c5a-c0f87f485677"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.306413 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.339357 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57304ae0-5067-4763-9c5a-c0f87f485677-kube-api-access-w774r" (OuterVolumeSpecName: "kube-api-access-w774r") pod "57304ae0-5067-4763-9c5a-c0f87f485677" (UID: "57304ae0-5067-4763-9c5a-c0f87f485677"). InnerVolumeSpecName "kube-api-access-w774r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.383591 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57304ae0-5067-4763-9c5a-c0f87f485677" (UID: "57304ae0-5067-4763-9c5a-c0f87f485677"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.408220 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57304ae0-5067-4763-9c5a-c0f87f485677-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.408251 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w774r\" (UniqueName: \"kubernetes.io/projected/57304ae0-5067-4763-9c5a-c0f87f485677-kube-api-access-w774r\") on node \"crc\" DevicePath \"\"" Dec 09 17:23:52 crc kubenswrapper[4840]: E1209 17:23:52.610146 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.750071 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8dw82" event={"ID":"3751a56c-df7c-4c23-9950-29fe5bf6dc45","Type":"ContainerStarted","Data":"974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480"} Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.752880 4840 generic.go:334] "Generic (PLEG): container finished" podID="57304ae0-5067-4763-9c5a-c0f87f485677" containerID="1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d" exitCode=0 Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.752924 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-shtcd" event={"ID":"57304ae0-5067-4763-9c5a-c0f87f485677","Type":"ContainerDied","Data":"1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d"} Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.752951 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-shtcd" event={"ID":"57304ae0-5067-4763-9c5a-c0f87f485677","Type":"ContainerDied","Data":"89b2e7771f957a6602f00f67cae2ae5686abfc925f34a72525158ce917aebf14"} Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.752988 4840 scope.go:117] "RemoveContainer" containerID="1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.753108 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-shtcd" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.772438 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8dw82" podStartSLOduration=3.021119595 podStartE2EDuration="5.772421254s" podCreationTimestamp="2025-12-09 17:23:47 +0000 UTC" firstStartedPulling="2025-12-09 17:23:49.702722468 +0000 UTC m=+1615.693833101" lastFinishedPulling="2025-12-09 17:23:52.454024127 +0000 UTC m=+1618.445134760" observedRunningTime="2025-12-09 17:23:52.766930426 +0000 UTC m=+1618.758041059" watchObservedRunningTime="2025-12-09 17:23:52.772421254 +0000 UTC m=+1618.763531887" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.794745 4840 scope.go:117] "RemoveContainer" containerID="61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.806035 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-shtcd"] Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.845050 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-shtcd"] Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.848311 4840 scope.go:117] "RemoveContainer" containerID="ef6338e847c6b08879f9df83068f8308aee3c9d6cd17e29c082d8af16015cc03" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.905114 4840 scope.go:117] "RemoveContainer" containerID="1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d" Dec 09 17:23:52 crc kubenswrapper[4840]: E1209 17:23:52.905559 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d\": container with ID starting with 1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d not found: ID does not exist" containerID="1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.905585 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d"} err="failed to get container status \"1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d\": rpc error: code = NotFound desc = could not find container \"1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d\": container with ID starting with 1b58a87b10a7c5039d875469bc8525f49d31a4d874d3e0096f198acd2b62f87d not found: ID does not exist" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.905608 4840 scope.go:117] "RemoveContainer" containerID="61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c" Dec 09 17:23:52 crc kubenswrapper[4840]: E1209 17:23:52.905927 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c\": container with ID starting with 61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c not found: ID does not exist" containerID="61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.905948 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c"} err="failed to get container status \"61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c\": rpc error: code = NotFound desc = could not find container \"61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c\": container with ID starting with 61c14ef8ca656f7b9459eddc1d3b4133555eab21928cb6bf65fb39ccf33aef9c not found: ID does not exist" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.906126 4840 scope.go:117] "RemoveContainer" containerID="ef6338e847c6b08879f9df83068f8308aee3c9d6cd17e29c082d8af16015cc03" Dec 09 17:23:52 crc kubenswrapper[4840]: E1209 17:23:52.906361 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef6338e847c6b08879f9df83068f8308aee3c9d6cd17e29c082d8af16015cc03\": container with ID starting with ef6338e847c6b08879f9df83068f8308aee3c9d6cd17e29c082d8af16015cc03 not found: ID does not exist" containerID="ef6338e847c6b08879f9df83068f8308aee3c9d6cd17e29c082d8af16015cc03" Dec 09 17:23:52 crc kubenswrapper[4840]: I1209 17:23:52.906384 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef6338e847c6b08879f9df83068f8308aee3c9d6cd17e29c082d8af16015cc03"} err="failed to get container status \"ef6338e847c6b08879f9df83068f8308aee3c9d6cd17e29c082d8af16015cc03\": rpc error: code = NotFound desc = could not find container \"ef6338e847c6b08879f9df83068f8308aee3c9d6cd17e29c082d8af16015cc03\": container with ID starting with ef6338e847c6b08879f9df83068f8308aee3c9d6cd17e29c082d8af16015cc03 not found: ID does not exist" Dec 09 17:23:54 crc kubenswrapper[4840]: I1209 17:23:54.626924 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57304ae0-5067-4763-9c5a-c0f87f485677" path="/var/lib/kubelet/pods/57304ae0-5067-4763-9c5a-c0f87f485677/volumes" Dec 09 17:23:57 crc kubenswrapper[4840]: I1209 17:23:57.928729 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:57 crc kubenswrapper[4840]: I1209 17:23:57.929326 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:57 crc kubenswrapper[4840]: I1209 17:23:57.980922 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:58 crc kubenswrapper[4840]: I1209 17:23:58.892108 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:23:58 crc kubenswrapper[4840]: I1209 17:23:58.947154 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8dw82"] Dec 09 17:23:59 crc kubenswrapper[4840]: I1209 17:23:59.609402 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:23:59 crc kubenswrapper[4840]: E1209 17:23:59.610129 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:24:00 crc kubenswrapper[4840]: I1209 17:24:00.845096 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8dw82" podUID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" containerName="registry-server" containerID="cri-o://974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480" gracePeriod=2 Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.521622 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:24:01 crc kubenswrapper[4840]: E1209 17:24:01.610908 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.691461 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nn4jv\" (UniqueName: \"kubernetes.io/projected/3751a56c-df7c-4c23-9950-29fe5bf6dc45-kube-api-access-nn4jv\") pod \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.691554 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-utilities\") pod \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.691675 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-catalog-content\") pod \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\" (UID: \"3751a56c-df7c-4c23-9950-29fe5bf6dc45\") " Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.692642 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-utilities" (OuterVolumeSpecName: "utilities") pod "3751a56c-df7c-4c23-9950-29fe5bf6dc45" (UID: "3751a56c-df7c-4c23-9950-29fe5bf6dc45"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.697236 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3751a56c-df7c-4c23-9950-29fe5bf6dc45-kube-api-access-nn4jv" (OuterVolumeSpecName: "kube-api-access-nn4jv") pod "3751a56c-df7c-4c23-9950-29fe5bf6dc45" (UID: "3751a56c-df7c-4c23-9950-29fe5bf6dc45"). InnerVolumeSpecName "kube-api-access-nn4jv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.713046 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3751a56c-df7c-4c23-9950-29fe5bf6dc45" (UID: "3751a56c-df7c-4c23-9950-29fe5bf6dc45"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.795391 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.795457 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3751a56c-df7c-4c23-9950-29fe5bf6dc45-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.795476 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nn4jv\" (UniqueName: \"kubernetes.io/projected/3751a56c-df7c-4c23-9950-29fe5bf6dc45-kube-api-access-nn4jv\") on node \"crc\" DevicePath \"\"" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.854951 4840 generic.go:334] "Generic (PLEG): container finished" podID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" containerID="974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480" exitCode=0 Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.854993 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8dw82" event={"ID":"3751a56c-df7c-4c23-9950-29fe5bf6dc45","Type":"ContainerDied","Data":"974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480"} Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.855039 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8dw82" event={"ID":"3751a56c-df7c-4c23-9950-29fe5bf6dc45","Type":"ContainerDied","Data":"3ff719bbb43e2f9f37f36cf3522c9009971a7d1f1b1010eb211300f3a18b8fa3"} Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.855060 4840 scope.go:117] "RemoveContainer" containerID="974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.855004 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8dw82" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.887227 4840 scope.go:117] "RemoveContainer" containerID="cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.892184 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8dw82"] Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.906391 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8dw82"] Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.909882 4840 scope.go:117] "RemoveContainer" containerID="f8ab17189f5f4a2ad365a2fea6ce4a9eb8eeb77adb76760ffafb17fd5c0e041d" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.963798 4840 scope.go:117] "RemoveContainer" containerID="974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480" Dec 09 17:24:01 crc kubenswrapper[4840]: E1209 17:24:01.964452 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480\": container with ID starting with 974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480 not found: ID does not exist" containerID="974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.964502 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480"} err="failed to get container status \"974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480\": rpc error: code = NotFound desc = could not find container \"974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480\": container with ID starting with 974a4ce9f544524ef2093cea5275d7290cc503945583567cfff3bc5d16de7480 not found: ID does not exist" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.964539 4840 scope.go:117] "RemoveContainer" containerID="cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777" Dec 09 17:24:01 crc kubenswrapper[4840]: E1209 17:24:01.964917 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777\": container with ID starting with cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777 not found: ID does not exist" containerID="cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.964941 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777"} err="failed to get container status \"cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777\": rpc error: code = NotFound desc = could not find container \"cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777\": container with ID starting with cf2755e6b01a9dc2d704a2d14102442e8283c37b142368a9ea4c3870e5b1e777 not found: ID does not exist" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.964957 4840 scope.go:117] "RemoveContainer" containerID="f8ab17189f5f4a2ad365a2fea6ce4a9eb8eeb77adb76760ffafb17fd5c0e041d" Dec 09 17:24:01 crc kubenswrapper[4840]: E1209 17:24:01.965509 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8ab17189f5f4a2ad365a2fea6ce4a9eb8eeb77adb76760ffafb17fd5c0e041d\": container with ID starting with f8ab17189f5f4a2ad365a2fea6ce4a9eb8eeb77adb76760ffafb17fd5c0e041d not found: ID does not exist" containerID="f8ab17189f5f4a2ad365a2fea6ce4a9eb8eeb77adb76760ffafb17fd5c0e041d" Dec 09 17:24:01 crc kubenswrapper[4840]: I1209 17:24:01.965574 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8ab17189f5f4a2ad365a2fea6ce4a9eb8eeb77adb76760ffafb17fd5c0e041d"} err="failed to get container status \"f8ab17189f5f4a2ad365a2fea6ce4a9eb8eeb77adb76760ffafb17fd5c0e041d\": rpc error: code = NotFound desc = could not find container \"f8ab17189f5f4a2ad365a2fea6ce4a9eb8eeb77adb76760ffafb17fd5c0e041d\": container with ID starting with f8ab17189f5f4a2ad365a2fea6ce4a9eb8eeb77adb76760ffafb17fd5c0e041d not found: ID does not exist" Dec 09 17:24:02 crc kubenswrapper[4840]: I1209 17:24:02.625167 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" path="/var/lib/kubelet/pods/3751a56c-df7c-4c23-9950-29fe5bf6dc45/volumes" Dec 09 17:24:03 crc kubenswrapper[4840]: E1209 17:24:03.612281 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:24:09 crc kubenswrapper[4840]: I1209 17:24:09.270545 4840 scope.go:117] "RemoveContainer" containerID="1bcf87ff53df7a1fcef6a8c1425b5d1044fa761ab24bf2f2c82cd5d739dd0fcf" Dec 09 17:24:09 crc kubenswrapper[4840]: I1209 17:24:09.323854 4840 scope.go:117] "RemoveContainer" containerID="31b7de18858821baab09999cca70834aa04404fc36fd5f265f6740bf8d02f980" Dec 09 17:24:14 crc kubenswrapper[4840]: I1209 17:24:14.618250 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:24:14 crc kubenswrapper[4840]: E1209 17:24:14.619100 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:24:16 crc kubenswrapper[4840]: E1209 17:24:16.611293 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:24:17 crc kubenswrapper[4840]: E1209 17:24:17.610296 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:24:28 crc kubenswrapper[4840]: I1209 17:24:28.608706 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:24:28 crc kubenswrapper[4840]: E1209 17:24:28.609434 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:24:28 crc kubenswrapper[4840]: E1209 17:24:28.610818 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:24:30 crc kubenswrapper[4840]: E1209 17:24:30.612567 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:24:39 crc kubenswrapper[4840]: I1209 17:24:39.608622 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:24:39 crc kubenswrapper[4840]: E1209 17:24:39.609278 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:24:39 crc kubenswrapper[4840]: E1209 17:24:39.612598 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:24:43 crc kubenswrapper[4840]: E1209 17:24:43.748005 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:24:43 crc kubenswrapper[4840]: E1209 17:24:43.749464 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:24:43 crc kubenswrapper[4840]: E1209 17:24:43.749638 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:24:43 crc kubenswrapper[4840]: E1209 17:24:43.750997 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:24:53 crc kubenswrapper[4840]: I1209 17:24:53.609375 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:24:53 crc kubenswrapper[4840]: E1209 17:24:53.610033 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:24:53 crc kubenswrapper[4840]: E1209 17:24:53.735062 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:24:53 crc kubenswrapper[4840]: E1209 17:24:53.735148 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:24:53 crc kubenswrapper[4840]: E1209 17:24:53.735337 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:24:53 crc kubenswrapper[4840]: E1209 17:24:53.736821 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:24:56 crc kubenswrapper[4840]: E1209 17:24:56.610578 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:25:05 crc kubenswrapper[4840]: I1209 17:25:05.608155 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:25:05 crc kubenswrapper[4840]: E1209 17:25:05.608804 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:25:07 crc kubenswrapper[4840]: E1209 17:25:07.611333 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:25:08 crc kubenswrapper[4840]: E1209 17:25:08.611461 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:25:17 crc kubenswrapper[4840]: I1209 17:25:17.608563 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:25:17 crc kubenswrapper[4840]: E1209 17:25:17.609363 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:25:20 crc kubenswrapper[4840]: E1209 17:25:20.610927 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:25:22 crc kubenswrapper[4840]: E1209 17:25:22.610252 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:25:28 crc kubenswrapper[4840]: I1209 17:25:28.610091 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:25:28 crc kubenswrapper[4840]: E1209 17:25:28.611362 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:25:32 crc kubenswrapper[4840]: E1209 17:25:32.610353 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:25:37 crc kubenswrapper[4840]: E1209 17:25:37.610539 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:25:41 crc kubenswrapper[4840]: I1209 17:25:41.609241 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:25:41 crc kubenswrapper[4840]: E1209 17:25:41.609932 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:25:47 crc kubenswrapper[4840]: E1209 17:25:47.611769 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:25:51 crc kubenswrapper[4840]: E1209 17:25:51.610753 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:25:53 crc kubenswrapper[4840]: I1209 17:25:53.608806 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:25:53 crc kubenswrapper[4840]: E1209 17:25:53.609251 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:26:00 crc kubenswrapper[4840]: E1209 17:26:00.611047 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:26:03 crc kubenswrapper[4840]: E1209 17:26:03.610257 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:26:04 crc kubenswrapper[4840]: I1209 17:26:04.615252 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:26:04 crc kubenswrapper[4840]: E1209 17:26:04.615502 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:26:15 crc kubenswrapper[4840]: I1209 17:26:15.608405 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:26:15 crc kubenswrapper[4840]: E1209 17:26:15.609290 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:26:15 crc kubenswrapper[4840]: E1209 17:26:15.611180 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:26:17 crc kubenswrapper[4840]: E1209 17:26:17.611000 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:26:27 crc kubenswrapper[4840]: E1209 17:26:27.611279 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:26:28 crc kubenswrapper[4840]: I1209 17:26:28.611314 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:26:28 crc kubenswrapper[4840]: E1209 17:26:28.612034 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:26:28 crc kubenswrapper[4840]: I1209 17:26:28.973871 4840 generic.go:334] "Generic (PLEG): container finished" podID="07769bef-a8d2-452e-af4d-c33e9c99da4b" containerID="9f9d9cc22efa1c07a220c156154dbad24295a02e0cba2744925f6eaead20d527" exitCode=0 Dec 09 17:26:28 crc kubenswrapper[4840]: I1209 17:26:28.973939 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" event={"ID":"07769bef-a8d2-452e-af4d-c33e9c99da4b","Type":"ContainerDied","Data":"9f9d9cc22efa1c07a220c156154dbad24295a02e0cba2744925f6eaead20d527"} Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.583406 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.757502 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-bootstrap-combined-ca-bundle\") pod \"07769bef-a8d2-452e-af4d-c33e9c99da4b\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.757606 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttdql\" (UniqueName: \"kubernetes.io/projected/07769bef-a8d2-452e-af4d-c33e9c99da4b-kube-api-access-ttdql\") pod \"07769bef-a8d2-452e-af4d-c33e9c99da4b\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.757660 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-ssh-key\") pod \"07769bef-a8d2-452e-af4d-c33e9c99da4b\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.757708 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-inventory\") pod \"07769bef-a8d2-452e-af4d-c33e9c99da4b\" (UID: \"07769bef-a8d2-452e-af4d-c33e9c99da4b\") " Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.764944 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07769bef-a8d2-452e-af4d-c33e9c99da4b-kube-api-access-ttdql" (OuterVolumeSpecName: "kube-api-access-ttdql") pod "07769bef-a8d2-452e-af4d-c33e9c99da4b" (UID: "07769bef-a8d2-452e-af4d-c33e9c99da4b"). InnerVolumeSpecName "kube-api-access-ttdql". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.766337 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "07769bef-a8d2-452e-af4d-c33e9c99da4b" (UID: "07769bef-a8d2-452e-af4d-c33e9c99da4b"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.789152 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "07769bef-a8d2-452e-af4d-c33e9c99da4b" (UID: "07769bef-a8d2-452e-af4d-c33e9c99da4b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.806238 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-inventory" (OuterVolumeSpecName: "inventory") pod "07769bef-a8d2-452e-af4d-c33e9c99da4b" (UID: "07769bef-a8d2-452e-af4d-c33e9c99da4b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.861083 4840 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.861123 4840 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.861138 4840 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07769bef-a8d2-452e-af4d-c33e9c99da4b-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 17:26:30 crc kubenswrapper[4840]: I1209 17:26:30.861155 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttdql\" (UniqueName: \"kubernetes.io/projected/07769bef-a8d2-452e-af4d-c33e9c99da4b-kube-api-access-ttdql\") on node \"crc\" DevicePath \"\"" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.003019 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" event={"ID":"07769bef-a8d2-452e-af4d-c33e9c99da4b","Type":"ContainerDied","Data":"79e776cb787ca905851d8ae5c50413a6e4dd8c42fd9d3511191e097040e121ba"} Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.003082 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79e776cb787ca905851d8ae5c50413a6e4dd8c42fd9d3511191e097040e121ba" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.003137 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.122680 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7"] Dec 09 17:26:31 crc kubenswrapper[4840]: E1209 17:26:31.123685 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57304ae0-5067-4763-9c5a-c0f87f485677" containerName="extract-content" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.123707 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="57304ae0-5067-4763-9c5a-c0f87f485677" containerName="extract-content" Dec 09 17:26:31 crc kubenswrapper[4840]: E1209 17:26:31.123720 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" containerName="extract-content" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.123727 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" containerName="extract-content" Dec 09 17:26:31 crc kubenswrapper[4840]: E1209 17:26:31.123754 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57304ae0-5067-4763-9c5a-c0f87f485677" containerName="registry-server" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.123761 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="57304ae0-5067-4763-9c5a-c0f87f485677" containerName="registry-server" Dec 09 17:26:31 crc kubenswrapper[4840]: E1209 17:26:31.123787 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" containerName="registry-server" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.123793 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" containerName="registry-server" Dec 09 17:26:31 crc kubenswrapper[4840]: E1209 17:26:31.123801 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" containerName="extract-utilities" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.123807 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" containerName="extract-utilities" Dec 09 17:26:31 crc kubenswrapper[4840]: E1209 17:26:31.123820 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07769bef-a8d2-452e-af4d-c33e9c99da4b" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.123826 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="07769bef-a8d2-452e-af4d-c33e9c99da4b" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 09 17:26:31 crc kubenswrapper[4840]: E1209 17:26:31.123850 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57304ae0-5067-4763-9c5a-c0f87f485677" containerName="extract-utilities" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.123855 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="57304ae0-5067-4763-9c5a-c0f87f485677" containerName="extract-utilities" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.124071 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="3751a56c-df7c-4c23-9950-29fe5bf6dc45" containerName="registry-server" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.124093 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="07769bef-a8d2-452e-af4d-c33e9c99da4b" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.124104 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="57304ae0-5067-4763-9c5a-c0f87f485677" containerName="registry-server" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.124820 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.129651 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.129692 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.129885 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qrgfg" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.129988 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.137700 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7"] Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.270625 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjdz9\" (UniqueName: \"kubernetes.io/projected/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-kube-api-access-zjdz9\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.271323 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.271427 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.373835 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjdz9\" (UniqueName: \"kubernetes.io/projected/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-kube-api-access-zjdz9\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.374117 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.374212 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.383726 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.387687 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.391148 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjdz9\" (UniqueName: \"kubernetes.io/projected/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-kube-api-access-zjdz9\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.442658 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:26:31 crc kubenswrapper[4840]: I1209 17:26:31.958004 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7"] Dec 09 17:26:32 crc kubenswrapper[4840]: I1209 17:26:32.012769 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" event={"ID":"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36","Type":"ContainerStarted","Data":"69f41ff711634da7b3abadd4e912add0ceb73b65b69b8be32d97c5ebabe24e5d"} Dec 09 17:26:32 crc kubenswrapper[4840]: E1209 17:26:32.611118 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:26:34 crc kubenswrapper[4840]: I1209 17:26:34.041406 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" event={"ID":"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36","Type":"ContainerStarted","Data":"b21293e410947e7f6a5069aba5fc81c95fce88d2cbba708672b854b61bbc19ba"} Dec 09 17:26:34 crc kubenswrapper[4840]: I1209 17:26:34.077777 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" podStartSLOduration=2.127404342 podStartE2EDuration="3.077760034s" podCreationTimestamp="2025-12-09 17:26:31 +0000 UTC" firstStartedPulling="2025-12-09 17:26:31.96018888 +0000 UTC m=+1777.951299513" lastFinishedPulling="2025-12-09 17:26:32.910544562 +0000 UTC m=+1778.901655205" observedRunningTime="2025-12-09 17:26:34.065886781 +0000 UTC m=+1780.056997434" watchObservedRunningTime="2025-12-09 17:26:34.077760034 +0000 UTC m=+1780.068870667" Dec 09 17:26:35 crc kubenswrapper[4840]: I1209 17:26:35.052202 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-wzl7f"] Dec 09 17:26:35 crc kubenswrapper[4840]: I1209 17:26:35.066488 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-lpswr"] Dec 09 17:26:35 crc kubenswrapper[4840]: I1209 17:26:35.079172 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-wzl7f"] Dec 09 17:26:35 crc kubenswrapper[4840]: I1209 17:26:35.088736 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-lpswr"] Dec 09 17:26:36 crc kubenswrapper[4840]: I1209 17:26:36.623086 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4140b4de-95a1-4ecc-bcb7-13252484a4be" path="/var/lib/kubelet/pods/4140b4de-95a1-4ecc-bcb7-13252484a4be/volumes" Dec 09 17:26:36 crc kubenswrapper[4840]: I1209 17:26:36.624487 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9f3b116-8f98-4b31-8bf2-71f2c9dca16b" path="/var/lib/kubelet/pods/c9f3b116-8f98-4b31-8bf2-71f2c9dca16b/volumes" Dec 09 17:26:38 crc kubenswrapper[4840]: I1209 17:26:38.037843 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-c44f-account-create-update-rmd7r"] Dec 09 17:26:38 crc kubenswrapper[4840]: I1209 17:26:38.052505 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-6847-account-create-update-674bs"] Dec 09 17:26:38 crc kubenswrapper[4840]: I1209 17:26:38.065537 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-c44f-account-create-update-rmd7r"] Dec 09 17:26:38 crc kubenswrapper[4840]: I1209 17:26:38.078848 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-6847-account-create-update-674bs"] Dec 09 17:26:38 crc kubenswrapper[4840]: E1209 17:26:38.610532 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:26:38 crc kubenswrapper[4840]: I1209 17:26:38.623677 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="160479a3-c4ef-43bf-b98d-6c92fab32d26" path="/var/lib/kubelet/pods/160479a3-c4ef-43bf-b98d-6c92fab32d26/volumes" Dec 09 17:26:38 crc kubenswrapper[4840]: I1209 17:26:38.625136 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d4e3a74-889a-44eb-ac49-4db6cea8bcd8" path="/var/lib/kubelet/pods/5d4e3a74-889a-44eb-ac49-4db6cea8bcd8/volumes" Dec 09 17:26:39 crc kubenswrapper[4840]: I1209 17:26:39.034630 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-5lvq6"] Dec 09 17:26:39 crc kubenswrapper[4840]: I1209 17:26:39.048608 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-2bce-account-create-update-slw4q"] Dec 09 17:26:39 crc kubenswrapper[4840]: I1209 17:26:39.061674 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-2bce-account-create-update-slw4q"] Dec 09 17:26:39 crc kubenswrapper[4840]: I1209 17:26:39.074960 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-5lvq6"] Dec 09 17:26:40 crc kubenswrapper[4840]: I1209 17:26:40.657086 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="960e53e8-5055-4d17-a8d0-acecc475511e" path="/var/lib/kubelet/pods/960e53e8-5055-4d17-a8d0-acecc475511e/volumes" Dec 09 17:26:40 crc kubenswrapper[4840]: I1209 17:26:40.657997 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a767ff72-af0e-4bb1-b30d-7b760595f234" path="/var/lib/kubelet/pods/a767ff72-af0e-4bb1-b30d-7b760595f234/volumes" Dec 09 17:26:42 crc kubenswrapper[4840]: I1209 17:26:42.608472 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:26:42 crc kubenswrapper[4840]: E1209 17:26:42.608862 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:26:44 crc kubenswrapper[4840]: E1209 17:26:44.611953 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:26:51 crc kubenswrapper[4840]: E1209 17:26:51.612739 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:26:57 crc kubenswrapper[4840]: I1209 17:26:57.609852 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:26:57 crc kubenswrapper[4840]: E1209 17:26:57.611222 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:26:59 crc kubenswrapper[4840]: E1209 17:26:59.611029 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:27:03 crc kubenswrapper[4840]: I1209 17:27:03.057043 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-db-create-g4td9"] Dec 09 17:27:03 crc kubenswrapper[4840]: I1209 17:27:03.077676 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-db-create-g4td9"] Dec 09 17:27:04 crc kubenswrapper[4840]: I1209 17:27:04.027528 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-2tqf7"] Dec 09 17:27:04 crc kubenswrapper[4840]: I1209 17:27:04.039911 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-2tqf7"] Dec 09 17:27:04 crc kubenswrapper[4840]: I1209 17:27:04.625154 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69a097ba-a134-4d5f-906b-0cdb275ff034" path="/var/lib/kubelet/pods/69a097ba-a134-4d5f-906b-0cdb275ff034/volumes" Dec 09 17:27:04 crc kubenswrapper[4840]: I1209 17:27:04.625844 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce" path="/var/lib/kubelet/pods/eed8dbd1-0d39-4a2d-aab1-f30ad04d98ce/volumes" Dec 09 17:27:04 crc kubenswrapper[4840]: E1209 17:27:04.627477 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.028649 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-905b-account-create-update-xmh8g"] Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.038014 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5ce1-account-create-update-n5v6g"] Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.050513 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-rwhks"] Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.059184 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-24c4-account-create-update-htjw8"] Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.082051 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-905b-account-create-update-xmh8g"] Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.094007 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-6lkf2"] Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.104361 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5ce1-account-create-update-n5v6g"] Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.115161 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-rwhks"] Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.124367 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-6lkf2"] Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.134430 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-24c4-account-create-update-htjw8"] Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.143594 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-1723-account-create-update-hmglv"] Dec 09 17:27:07 crc kubenswrapper[4840]: I1209 17:27:07.152878 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-1723-account-create-update-hmglv"] Dec 09 17:27:08 crc kubenswrapper[4840]: I1209 17:27:08.621651 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="183c8e61-03c5-46d5-a906-7943bf183913" path="/var/lib/kubelet/pods/183c8e61-03c5-46d5-a906-7943bf183913/volumes" Dec 09 17:27:08 crc kubenswrapper[4840]: I1209 17:27:08.622581 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b3dfd2c-a156-4f5f-b950-94623d183859" path="/var/lib/kubelet/pods/4b3dfd2c-a156-4f5f-b950-94623d183859/volumes" Dec 09 17:27:08 crc kubenswrapper[4840]: I1209 17:27:08.623159 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63846955-b953-4eeb-9c6b-72a87b9740e8" path="/var/lib/kubelet/pods/63846955-b953-4eeb-9c6b-72a87b9740e8/volumes" Dec 09 17:27:08 crc kubenswrapper[4840]: I1209 17:27:08.623745 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c269e4e2-0a9c-45ae-8a09-066ef4203036" path="/var/lib/kubelet/pods/c269e4e2-0a9c-45ae-8a09-066ef4203036/volumes" Dec 09 17:27:08 crc kubenswrapper[4840]: I1209 17:27:08.624819 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1914945-6f71-4d08-8c84-d02706ed7b17" path="/var/lib/kubelet/pods/e1914945-6f71-4d08-8c84-d02706ed7b17/volumes" Dec 09 17:27:08 crc kubenswrapper[4840]: I1209 17:27:08.625667 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="feb054c3-cc6f-4af4-9dce-0683f20ec01a" path="/var/lib/kubelet/pods/feb054c3-cc6f-4af4-9dce-0683f20ec01a/volumes" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.490552 4840 scope.go:117] "RemoveContainer" containerID="d4c71d14dc4efa3565f3b288922fa68090597e8ae09cc54bd61b42cc95aa7ac1" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.510947 4840 scope.go:117] "RemoveContainer" containerID="d26be9469d2567bd01c76582b8578a055de89e2a3d65843684e3814bb57485d5" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.563920 4840 scope.go:117] "RemoveContainer" containerID="dc78e48897ed6a351f5b61bebcd6eb792801f6cb3a9544d86ee7cfb5a36cecb1" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.609121 4840 scope.go:117] "RemoveContainer" containerID="4e696a39b1d8fa2d4a0db7b9d6f77f08361f4a70cc0f994c22f0909d3144485a" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.670469 4840 scope.go:117] "RemoveContainer" containerID="3c26da42463373081077bad1a87737ef2711a65e44049d16660238c9eec290a9" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.701017 4840 scope.go:117] "RemoveContainer" containerID="2ffc8cfdcd5e20f89923871bf804e4cd8179dc6d6963337a4300d9ba7c5fdc4d" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.753085 4840 scope.go:117] "RemoveContainer" containerID="554027641c60fbce63b1e7d768e9a313aa394ab2908707d448d943b46076d871" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.776162 4840 scope.go:117] "RemoveContainer" containerID="54e8239b5ec2ff95ceb85b476086e090ff7de7c80dbe5bcc1278a5d07674b89b" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.799183 4840 scope.go:117] "RemoveContainer" containerID="80bcb9b2d4f4380e1d19072232e3cfab681bc6a7ebc98fa91e0e7162ba1cd1e5" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.823476 4840 scope.go:117] "RemoveContainer" containerID="24f1dc5067538d010ada6bbf0fae7e0d639aa607d3e774a15a26246aea8b32e9" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.851546 4840 scope.go:117] "RemoveContainer" containerID="0dfe49a3f033bbd56658f0536a17ef9d93e2171beee7610921148bac2ea487bf" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.871444 4840 scope.go:117] "RemoveContainer" containerID="baec74f700582d39ae9a8906958e9199a691c9d779d4250aed41a4b79ebefb7e" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.898291 4840 scope.go:117] "RemoveContainer" containerID="fc75f4e17eed2b3d22906f8eaec1684e6a9eb84bbee122247d05128482621aca" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.918671 4840 scope.go:117] "RemoveContainer" containerID="1beaf98a12ca68dd684dfb02ea642b47cb6d12878189ed764ce203ecc03d1030" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.937637 4840 scope.go:117] "RemoveContainer" containerID="05255f1811dfe227a56f4deeae2b47df22d0a542c1f8c7eb67bb1a60a6e3188a" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.962937 4840 scope.go:117] "RemoveContainer" containerID="00f37ec0ca374e4e2afcaeac7f96f559b90706467a82277ff3b5c54d318595c2" Dec 09 17:27:09 crc kubenswrapper[4840]: I1209 17:27:09.983950 4840 scope.go:117] "RemoveContainer" containerID="6b0b4f3f887409d71bbeda9338169a446b1714a1645ef9f94bcd497a11633aa0" Dec 09 17:27:10 crc kubenswrapper[4840]: I1209 17:27:10.006366 4840 scope.go:117] "RemoveContainer" containerID="76161ed2ac33ef80b5875f7301526de119c4ae1a8188eb01e4a1b05d12bf7ad5" Dec 09 17:27:10 crc kubenswrapper[4840]: I1209 17:27:10.609036 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:27:10 crc kubenswrapper[4840]: E1209 17:27:10.609618 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:27:10 crc kubenswrapper[4840]: E1209 17:27:10.611136 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:27:12 crc kubenswrapper[4840]: I1209 17:27:12.027571 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-sjx4x"] Dec 09 17:27:12 crc kubenswrapper[4840]: I1209 17:27:12.036140 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-sjx4x"] Dec 09 17:27:12 crc kubenswrapper[4840]: I1209 17:27:12.623888 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cc949a4-49ca-42c2-b427-e8586dad8ebc" path="/var/lib/kubelet/pods/9cc949a4-49ca-42c2-b427-e8586dad8ebc/volumes" Dec 09 17:27:15 crc kubenswrapper[4840]: E1209 17:27:15.610953 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:27:23 crc kubenswrapper[4840]: E1209 17:27:23.611884 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:27:24 crc kubenswrapper[4840]: I1209 17:27:24.615325 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:27:24 crc kubenswrapper[4840]: E1209 17:27:24.615937 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:27:29 crc kubenswrapper[4840]: E1209 17:27:29.737734 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:27:29 crc kubenswrapper[4840]: E1209 17:27:29.738473 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:27:29 crc kubenswrapper[4840]: E1209 17:27:29.738660 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:27:29 crc kubenswrapper[4840]: E1209 17:27:29.740187 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:27:35 crc kubenswrapper[4840]: E1209 17:27:35.738872 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:27:35 crc kubenswrapper[4840]: E1209 17:27:35.739420 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:27:35 crc kubenswrapper[4840]: E1209 17:27:35.739581 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:27:35 crc kubenswrapper[4840]: E1209 17:27:35.740883 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:27:39 crc kubenswrapper[4840]: I1209 17:27:39.607952 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:27:40 crc kubenswrapper[4840]: I1209 17:27:40.059579 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-jnkjx"] Dec 09 17:27:40 crc kubenswrapper[4840]: I1209 17:27:40.073643 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-jnkjx"] Dec 09 17:27:40 crc kubenswrapper[4840]: I1209 17:27:40.630265 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87c31a2f-f8da-4391-91b7-16544aceaf18" path="/var/lib/kubelet/pods/87c31a2f-f8da-4391-91b7-16544aceaf18/volumes" Dec 09 17:27:40 crc kubenswrapper[4840]: I1209 17:27:40.820431 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"1b8939d779db3be6289d63ad3f90e0b23788c74d7a8574e72d6dce9560817730"} Dec 09 17:27:41 crc kubenswrapper[4840]: I1209 17:27:41.045400 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-zthv5"] Dec 09 17:27:41 crc kubenswrapper[4840]: I1209 17:27:41.055715 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-zthv5"] Dec 09 17:27:42 crc kubenswrapper[4840]: E1209 17:27:42.612390 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:27:42 crc kubenswrapper[4840]: I1209 17:27:42.628029 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9144e8b-9235-4e97-83a5-7525f0986083" path="/var/lib/kubelet/pods/e9144e8b-9235-4e97-83a5-7525f0986083/volumes" Dec 09 17:27:49 crc kubenswrapper[4840]: E1209 17:27:49.612192 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:27:54 crc kubenswrapper[4840]: I1209 17:27:54.034337 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-fptt8"] Dec 09 17:27:54 crc kubenswrapper[4840]: I1209 17:27:54.045192 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-fptt8"] Dec 09 17:27:54 crc kubenswrapper[4840]: E1209 17:27:54.610378 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:27:54 crc kubenswrapper[4840]: I1209 17:27:54.622890 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18f6c52e-2e47-442e-80fe-a03f7b9582fe" path="/var/lib/kubelet/pods/18f6c52e-2e47-442e-80fe-a03f7b9582fe/volumes" Dec 09 17:27:58 crc kubenswrapper[4840]: I1209 17:27:58.085198 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-frlvd"] Dec 09 17:27:58 crc kubenswrapper[4840]: I1209 17:27:58.107638 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-frlvd"] Dec 09 17:27:58 crc kubenswrapper[4840]: I1209 17:27:58.621637 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97895c55-e758-4bd3-981c-2c9bd5eeabcb" path="/var/lib/kubelet/pods/97895c55-e758-4bd3-981c-2c9bd5eeabcb/volumes" Dec 09 17:28:04 crc kubenswrapper[4840]: E1209 17:28:04.618808 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:28:07 crc kubenswrapper[4840]: E1209 17:28:07.610182 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:28:08 crc kubenswrapper[4840]: I1209 17:28:08.029712 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-4ts68"] Dec 09 17:28:08 crc kubenswrapper[4840]: I1209 17:28:08.040860 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-4ts68"] Dec 09 17:28:08 crc kubenswrapper[4840]: I1209 17:28:08.620315 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64c96895-a046-41da-83d1-5cb61d38de00" path="/var/lib/kubelet/pods/64c96895-a046-41da-83d1-5cb61d38de00/volumes" Dec 09 17:28:10 crc kubenswrapper[4840]: I1209 17:28:10.293534 4840 scope.go:117] "RemoveContainer" containerID="f55e4bae8f11978444d2b44fef67fc83574388fbb8ae5e524673f04682ef2bff" Dec 09 17:28:10 crc kubenswrapper[4840]: I1209 17:28:10.351530 4840 scope.go:117] "RemoveContainer" containerID="b1b0924cd3493f8478df6e91773d50a15973bd7df22a8c7365b6eae2e708f66b" Dec 09 17:28:10 crc kubenswrapper[4840]: I1209 17:28:10.398734 4840 scope.go:117] "RemoveContainer" containerID="2d3fd5a814ca00b805df2bf63f79a515f74ad4f54b17f51a56c606d04ca23ac0" Dec 09 17:28:10 crc kubenswrapper[4840]: I1209 17:28:10.452908 4840 scope.go:117] "RemoveContainer" containerID="fde18d4fe6a6418a8f526ffb3379ab1015673866948bd451ae9d5b5f640f2464" Dec 09 17:28:10 crc kubenswrapper[4840]: I1209 17:28:10.516442 4840 scope.go:117] "RemoveContainer" containerID="8156bb35fc59e93457f66565c135b52bec2b18d319f573bf1857ced938768472" Dec 09 17:28:10 crc kubenswrapper[4840]: I1209 17:28:10.588372 4840 scope.go:117] "RemoveContainer" containerID="badd043f150336fe753cf4703c39aff99f4b65cb758bead86f410a1754883bff" Dec 09 17:28:15 crc kubenswrapper[4840]: E1209 17:28:15.612952 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:28:17 crc kubenswrapper[4840]: I1209 17:28:17.024633 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-qv6wf"] Dec 09 17:28:17 crc kubenswrapper[4840]: I1209 17:28:17.032955 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-qv6wf"] Dec 09 17:28:18 crc kubenswrapper[4840]: I1209 17:28:18.633863 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ff5b771-f400-4f66-9d95-9f66fff18a82" path="/var/lib/kubelet/pods/7ff5b771-f400-4f66-9d95-9f66fff18a82/volumes" Dec 09 17:28:19 crc kubenswrapper[4840]: E1209 17:28:19.610426 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:28:26 crc kubenswrapper[4840]: I1209 17:28:26.058661 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-storageinit-ddrkz"] Dec 09 17:28:26 crc kubenswrapper[4840]: I1209 17:28:26.070857 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-storageinit-ddrkz"] Dec 09 17:28:26 crc kubenswrapper[4840]: E1209 17:28:26.611348 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:28:26 crc kubenswrapper[4840]: I1209 17:28:26.622189 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42aee3b4-245f-4a6c-8765-ea5b407d0c2e" path="/var/lib/kubelet/pods/42aee3b4-245f-4a6c-8765-ea5b407d0c2e/volumes" Dec 09 17:28:32 crc kubenswrapper[4840]: E1209 17:28:32.610372 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:28:41 crc kubenswrapper[4840]: E1209 17:28:41.613756 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:28:43 crc kubenswrapper[4840]: E1209 17:28:43.611297 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:28:54 crc kubenswrapper[4840]: E1209 17:28:54.618670 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:28:55 crc kubenswrapper[4840]: E1209 17:28:55.610864 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:29:08 crc kubenswrapper[4840]: I1209 17:29:08.038294 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-l72ts"] Dec 09 17:29:08 crc kubenswrapper[4840]: I1209 17:29:08.048102 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-l72ts"] Dec 09 17:29:08 crc kubenswrapper[4840]: I1209 17:29:08.640945 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e6c81c8-f19e-4262-be49-f7b0f5dc707f" path="/var/lib/kubelet/pods/1e6c81c8-f19e-4262-be49-f7b0f5dc707f/volumes" Dec 09 17:29:09 crc kubenswrapper[4840]: I1209 17:29:09.026197 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-t7sm2"] Dec 09 17:29:09 crc kubenswrapper[4840]: I1209 17:29:09.037108 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-4brzw"] Dec 09 17:29:09 crc kubenswrapper[4840]: I1209 17:29:09.045052 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-t7sm2"] Dec 09 17:29:09 crc kubenswrapper[4840]: I1209 17:29:09.053698 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-4brzw"] Dec 09 17:29:09 crc kubenswrapper[4840]: E1209 17:29:09.623684 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:29:09 crc kubenswrapper[4840]: E1209 17:29:09.623728 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:29:10 crc kubenswrapper[4840]: I1209 17:29:10.044034 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-f458-account-create-update-mmdhp"] Dec 09 17:29:10 crc kubenswrapper[4840]: I1209 17:29:10.056343 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-f458-account-create-update-mmdhp"] Dec 09 17:29:10 crc kubenswrapper[4840]: I1209 17:29:10.621457 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b68c48b-8b21-4a23-ad36-d987eeae2757" path="/var/lib/kubelet/pods/0b68c48b-8b21-4a23-ad36-d987eeae2757/volumes" Dec 09 17:29:10 crc kubenswrapper[4840]: I1209 17:29:10.622134 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b92c382-2599-4692-9fc6-557ef858013d" path="/var/lib/kubelet/pods/8b92c382-2599-4692-9fc6-557ef858013d/volumes" Dec 09 17:29:10 crc kubenswrapper[4840]: I1209 17:29:10.622775 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4" path="/var/lib/kubelet/pods/f2e7689c-4750-4d5b-9cea-c0ec0fc8cda4/volumes" Dec 09 17:29:10 crc kubenswrapper[4840]: I1209 17:29:10.809700 4840 scope.go:117] "RemoveContainer" containerID="001569c1dde279ca7136d97884b55c5dd1883104d3d77cb098824b2940ac3d55" Dec 09 17:29:10 crc kubenswrapper[4840]: I1209 17:29:10.838724 4840 scope.go:117] "RemoveContainer" containerID="81fcdb981828bbad17175a0d13cb25175f65aad82fba157487ecc9166a4378a1" Dec 09 17:29:10 crc kubenswrapper[4840]: I1209 17:29:10.913713 4840 scope.go:117] "RemoveContainer" containerID="fbe04ed4cf5ed7a4021ff4fd97f03393e2f65a064b2a6d987117603817dbac03" Dec 09 17:29:10 crc kubenswrapper[4840]: I1209 17:29:10.965379 4840 scope.go:117] "RemoveContainer" containerID="f39c557d83b361b7cba8e942511dda27a1a7e5a9942ad897a51fadfcfab22f62" Dec 09 17:29:11 crc kubenswrapper[4840]: I1209 17:29:11.020518 4840 scope.go:117] "RemoveContainer" containerID="2f5a92a46f2284a8f6a5d84c987a7df97201c78acf4b4214641c42cc35bb5d59" Dec 09 17:29:11 crc kubenswrapper[4840]: I1209 17:29:11.064168 4840 scope.go:117] "RemoveContainer" containerID="dfc9acac5dd49d68d5b610e43cefd88edf85db17be73880a3e4512467e7849c4" Dec 09 17:29:12 crc kubenswrapper[4840]: I1209 17:29:12.035112 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-88e8-account-create-update-dqtnk"] Dec 09 17:29:12 crc kubenswrapper[4840]: I1209 17:29:12.046457 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-88e8-account-create-update-dqtnk"] Dec 09 17:29:12 crc kubenswrapper[4840]: I1209 17:29:12.621950 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1016da39-4885-4421-95bb-07c658b86dfd" path="/var/lib/kubelet/pods/1016da39-4885-4421-95bb-07c658b86dfd/volumes" Dec 09 17:29:14 crc kubenswrapper[4840]: I1209 17:29:14.026532 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-e8ef-account-create-update-g9drg"] Dec 09 17:29:14 crc kubenswrapper[4840]: I1209 17:29:14.035904 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-e8ef-account-create-update-g9drg"] Dec 09 17:29:14 crc kubenswrapper[4840]: I1209 17:29:14.637832 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4691d812-4f31-4379-9b25-e1fcafa891ff" path="/var/lib/kubelet/pods/4691d812-4f31-4379-9b25-e1fcafa891ff/volumes" Dec 09 17:29:21 crc kubenswrapper[4840]: E1209 17:29:21.611473 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:29:22 crc kubenswrapper[4840]: E1209 17:29:22.612874 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:29:33 crc kubenswrapper[4840]: E1209 17:29:33.611879 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:29:34 crc kubenswrapper[4840]: E1209 17:29:34.617202 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:29:48 crc kubenswrapper[4840]: I1209 17:29:48.053424 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-bwmsc"] Dec 09 17:29:48 crc kubenswrapper[4840]: I1209 17:29:48.067134 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-bwmsc"] Dec 09 17:29:48 crc kubenswrapper[4840]: E1209 17:29:48.610548 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:29:48 crc kubenswrapper[4840]: I1209 17:29:48.621898 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9eaf9500-68c1-4f32-a00e-693c221c4cb0" path="/var/lib/kubelet/pods/9eaf9500-68c1-4f32-a00e-693c221c4cb0/volumes" Dec 09 17:29:49 crc kubenswrapper[4840]: E1209 17:29:49.610101 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.155562 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b"] Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.157782 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.159687 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.159826 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.176145 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b"] Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.205956 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdhlm\" (UniqueName: \"kubernetes.io/projected/e8c1ca2b-b533-41a6-a909-c6132352e702-kube-api-access-qdhlm\") pod \"collect-profiles-29421690-cq86b\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.206141 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e8c1ca2b-b533-41a6-a909-c6132352e702-config-volume\") pod \"collect-profiles-29421690-cq86b\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.206202 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e8c1ca2b-b533-41a6-a909-c6132352e702-secret-volume\") pod \"collect-profiles-29421690-cq86b\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.307716 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e8c1ca2b-b533-41a6-a909-c6132352e702-config-volume\") pod \"collect-profiles-29421690-cq86b\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.307861 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e8c1ca2b-b533-41a6-a909-c6132352e702-secret-volume\") pod \"collect-profiles-29421690-cq86b\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.308065 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdhlm\" (UniqueName: \"kubernetes.io/projected/e8c1ca2b-b533-41a6-a909-c6132352e702-kube-api-access-qdhlm\") pod \"collect-profiles-29421690-cq86b\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.308715 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e8c1ca2b-b533-41a6-a909-c6132352e702-config-volume\") pod \"collect-profiles-29421690-cq86b\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.313884 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e8c1ca2b-b533-41a6-a909-c6132352e702-secret-volume\") pod \"collect-profiles-29421690-cq86b\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.330336 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdhlm\" (UniqueName: \"kubernetes.io/projected/e8c1ca2b-b533-41a6-a909-c6132352e702-kube-api-access-qdhlm\") pod \"collect-profiles-29421690-cq86b\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.483849 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:00 crc kubenswrapper[4840]: I1209 17:30:00.966061 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b"] Dec 09 17:30:01 crc kubenswrapper[4840]: I1209 17:30:01.238206 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" event={"ID":"e8c1ca2b-b533-41a6-a909-c6132352e702","Type":"ContainerStarted","Data":"a951986796ad1f061b96696236ab0e8d0154898a786c55470f7cbe6b363bf1ca"} Dec 09 17:30:01 crc kubenswrapper[4840]: I1209 17:30:01.238592 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" event={"ID":"e8c1ca2b-b533-41a6-a909-c6132352e702","Type":"ContainerStarted","Data":"e03fa9cbb0d2ae01d31c9b14ffdebfefd51c0239ac3c9d544edac2a09d32e7d4"} Dec 09 17:30:01 crc kubenswrapper[4840]: I1209 17:30:01.256087 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" podStartSLOduration=1.256069209 podStartE2EDuration="1.256069209s" podCreationTimestamp="2025-12-09 17:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 17:30:01.252143947 +0000 UTC m=+1987.243254580" watchObservedRunningTime="2025-12-09 17:30:01.256069209 +0000 UTC m=+1987.247179842" Dec 09 17:30:02 crc kubenswrapper[4840]: I1209 17:30:02.250132 4840 generic.go:334] "Generic (PLEG): container finished" podID="e8c1ca2b-b533-41a6-a909-c6132352e702" containerID="a951986796ad1f061b96696236ab0e8d0154898a786c55470f7cbe6b363bf1ca" exitCode=0 Dec 09 17:30:02 crc kubenswrapper[4840]: I1209 17:30:02.250239 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" event={"ID":"e8c1ca2b-b533-41a6-a909-c6132352e702","Type":"ContainerDied","Data":"a951986796ad1f061b96696236ab0e8d0154898a786c55470f7cbe6b363bf1ca"} Dec 09 17:30:02 crc kubenswrapper[4840]: E1209 17:30:02.614087 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:30:03 crc kubenswrapper[4840]: E1209 17:30:03.610332 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:30:03 crc kubenswrapper[4840]: I1209 17:30:03.678563 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:03 crc kubenswrapper[4840]: I1209 17:30:03.780741 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdhlm\" (UniqueName: \"kubernetes.io/projected/e8c1ca2b-b533-41a6-a909-c6132352e702-kube-api-access-qdhlm\") pod \"e8c1ca2b-b533-41a6-a909-c6132352e702\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " Dec 09 17:30:03 crc kubenswrapper[4840]: I1209 17:30:03.781025 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e8c1ca2b-b533-41a6-a909-c6132352e702-secret-volume\") pod \"e8c1ca2b-b533-41a6-a909-c6132352e702\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " Dec 09 17:30:03 crc kubenswrapper[4840]: I1209 17:30:03.781094 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e8c1ca2b-b533-41a6-a909-c6132352e702-config-volume\") pod \"e8c1ca2b-b533-41a6-a909-c6132352e702\" (UID: \"e8c1ca2b-b533-41a6-a909-c6132352e702\") " Dec 09 17:30:03 crc kubenswrapper[4840]: I1209 17:30:03.782098 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8c1ca2b-b533-41a6-a909-c6132352e702-config-volume" (OuterVolumeSpecName: "config-volume") pod "e8c1ca2b-b533-41a6-a909-c6132352e702" (UID: "e8c1ca2b-b533-41a6-a909-c6132352e702"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:30:03 crc kubenswrapper[4840]: I1209 17:30:03.798699 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8c1ca2b-b533-41a6-a909-c6132352e702-kube-api-access-qdhlm" (OuterVolumeSpecName: "kube-api-access-qdhlm") pod "e8c1ca2b-b533-41a6-a909-c6132352e702" (UID: "e8c1ca2b-b533-41a6-a909-c6132352e702"). InnerVolumeSpecName "kube-api-access-qdhlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:30:03 crc kubenswrapper[4840]: I1209 17:30:03.798808 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8c1ca2b-b533-41a6-a909-c6132352e702-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e8c1ca2b-b533-41a6-a909-c6132352e702" (UID: "e8c1ca2b-b533-41a6-a909-c6132352e702"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:30:03 crc kubenswrapper[4840]: I1209 17:30:03.883372 4840 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e8c1ca2b-b533-41a6-a909-c6132352e702-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 17:30:03 crc kubenswrapper[4840]: I1209 17:30:03.883400 4840 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e8c1ca2b-b533-41a6-a909-c6132352e702-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 17:30:03 crc kubenswrapper[4840]: I1209 17:30:03.883410 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdhlm\" (UniqueName: \"kubernetes.io/projected/e8c1ca2b-b533-41a6-a909-c6132352e702-kube-api-access-qdhlm\") on node \"crc\" DevicePath \"\"" Dec 09 17:30:04 crc kubenswrapper[4840]: I1209 17:30:04.036016 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:30:04 crc kubenswrapper[4840]: I1209 17:30:04.036116 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:30:04 crc kubenswrapper[4840]: I1209 17:30:04.271247 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" event={"ID":"e8c1ca2b-b533-41a6-a909-c6132352e702","Type":"ContainerDied","Data":"e03fa9cbb0d2ae01d31c9b14ffdebfefd51c0239ac3c9d544edac2a09d32e7d4"} Dec 09 17:30:04 crc kubenswrapper[4840]: I1209 17:30:04.271286 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e03fa9cbb0d2ae01d31c9b14ffdebfefd51c0239ac3c9d544edac2a09d32e7d4" Dec 09 17:30:04 crc kubenswrapper[4840]: I1209 17:30:04.271339 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b" Dec 09 17:30:04 crc kubenswrapper[4840]: I1209 17:30:04.344602 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc"] Dec 09 17:30:04 crc kubenswrapper[4840]: I1209 17:30:04.352501 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421645-m9lrc"] Dec 09 17:30:04 crc kubenswrapper[4840]: I1209 17:30:04.624203 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0eb66ea0-ca48-4f56-8911-0a048eb73a04" path="/var/lib/kubelet/pods/0eb66ea0-ca48-4f56-8911-0a048eb73a04/volumes" Dec 09 17:30:11 crc kubenswrapper[4840]: I1209 17:30:11.062942 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-f4pbw"] Dec 09 17:30:11 crc kubenswrapper[4840]: I1209 17:30:11.074867 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-f4pbw"] Dec 09 17:30:11 crc kubenswrapper[4840]: I1209 17:30:11.254187 4840 scope.go:117] "RemoveContainer" containerID="f856c0fb1f20af820ef9dfd98d73ce354e134758809cda0c796cd54cdd1f7705" Dec 09 17:30:11 crc kubenswrapper[4840]: I1209 17:30:11.296714 4840 scope.go:117] "RemoveContainer" containerID="eee8f899d6b6f495124f9d26248ee383f1f3791ad85f498f937e46e9d63a2460" Dec 09 17:30:11 crc kubenswrapper[4840]: I1209 17:30:11.335647 4840 scope.go:117] "RemoveContainer" containerID="96ed9e8bfa2fc1a1335d042506e471b3ce52f55ec0a4173d9a7bc047ad58451d" Dec 09 17:30:11 crc kubenswrapper[4840]: I1209 17:30:11.387699 4840 scope.go:117] "RemoveContainer" containerID="5a7c908211ae13f1ea15dda93641fd37f1e30ed5ab6bf1efa0260a58d67dc107" Dec 09 17:30:12 crc kubenswrapper[4840]: I1209 17:30:12.067285 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-64jbl"] Dec 09 17:30:12 crc kubenswrapper[4840]: I1209 17:30:12.082395 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-64jbl"] Dec 09 17:30:12 crc kubenswrapper[4840]: I1209 17:30:12.624380 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c371bcf-af55-4ef1-a3ad-172b2db7bfbb" path="/var/lib/kubelet/pods/4c371bcf-af55-4ef1-a3ad-172b2db7bfbb/volumes" Dec 09 17:30:12 crc kubenswrapper[4840]: I1209 17:30:12.625397 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce552d37-bf06-4a84-a9a0-111ad1b9698b" path="/var/lib/kubelet/pods/ce552d37-bf06-4a84-a9a0-111ad1b9698b/volumes" Dec 09 17:30:17 crc kubenswrapper[4840]: E1209 17:30:17.611548 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:30:18 crc kubenswrapper[4840]: E1209 17:30:18.611762 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:30:28 crc kubenswrapper[4840]: E1209 17:30:28.610327 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:30:33 crc kubenswrapper[4840]: E1209 17:30:33.612824 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:30:34 crc kubenswrapper[4840]: I1209 17:30:34.036307 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:30:34 crc kubenswrapper[4840]: I1209 17:30:34.036380 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:30:41 crc kubenswrapper[4840]: I1209 17:30:41.856437 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-75p42"] Dec 09 17:30:41 crc kubenswrapper[4840]: E1209 17:30:41.857571 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8c1ca2b-b533-41a6-a909-c6132352e702" containerName="collect-profiles" Dec 09 17:30:41 crc kubenswrapper[4840]: I1209 17:30:41.857590 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8c1ca2b-b533-41a6-a909-c6132352e702" containerName="collect-profiles" Dec 09 17:30:41 crc kubenswrapper[4840]: I1209 17:30:41.857865 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8c1ca2b-b533-41a6-a909-c6132352e702" containerName="collect-profiles" Dec 09 17:30:41 crc kubenswrapper[4840]: I1209 17:30:41.859661 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:41 crc kubenswrapper[4840]: I1209 17:30:41.870728 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-75p42"] Dec 09 17:30:41 crc kubenswrapper[4840]: I1209 17:30:41.952539 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-utilities\") pod \"certified-operators-75p42\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:41 crc kubenswrapper[4840]: I1209 17:30:41.953123 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-catalog-content\") pod \"certified-operators-75p42\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:41 crc kubenswrapper[4840]: I1209 17:30:41.953274 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skds7\" (UniqueName: \"kubernetes.io/projected/64e0a887-70fa-4479-a9cc-464fe41291ab-kube-api-access-skds7\") pod \"certified-operators-75p42\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:42 crc kubenswrapper[4840]: I1209 17:30:42.056170 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-catalog-content\") pod \"certified-operators-75p42\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:42 crc kubenswrapper[4840]: I1209 17:30:42.056274 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skds7\" (UniqueName: \"kubernetes.io/projected/64e0a887-70fa-4479-a9cc-464fe41291ab-kube-api-access-skds7\") pod \"certified-operators-75p42\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:42 crc kubenswrapper[4840]: I1209 17:30:42.056354 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-utilities\") pod \"certified-operators-75p42\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:42 crc kubenswrapper[4840]: I1209 17:30:42.057310 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-utilities\") pod \"certified-operators-75p42\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:42 crc kubenswrapper[4840]: I1209 17:30:42.057865 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-catalog-content\") pod \"certified-operators-75p42\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:42 crc kubenswrapper[4840]: I1209 17:30:42.087165 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skds7\" (UniqueName: \"kubernetes.io/projected/64e0a887-70fa-4479-a9cc-464fe41291ab-kube-api-access-skds7\") pod \"certified-operators-75p42\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:42 crc kubenswrapper[4840]: I1209 17:30:42.221590 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:42 crc kubenswrapper[4840]: E1209 17:30:42.616719 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:30:42 crc kubenswrapper[4840]: W1209 17:30:42.757253 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod64e0a887_70fa_4479_a9cc_464fe41291ab.slice/crio-a8e185e876619ea9b7e1eccbf1526ac8041198597a3cb8087839c1ff2d2efe7d WatchSource:0}: Error finding container a8e185e876619ea9b7e1eccbf1526ac8041198597a3cb8087839c1ff2d2efe7d: Status 404 returned error can't find the container with id a8e185e876619ea9b7e1eccbf1526ac8041198597a3cb8087839c1ff2d2efe7d Dec 09 17:30:42 crc kubenswrapper[4840]: I1209 17:30:42.757932 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-75p42"] Dec 09 17:30:43 crc kubenswrapper[4840]: I1209 17:30:43.691858 4840 generic.go:334] "Generic (PLEG): container finished" podID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerID="6cf78193f873bc188e425a473580cfc5db20f0c0691297eb7608274fe77f6178" exitCode=0 Dec 09 17:30:43 crc kubenswrapper[4840]: I1209 17:30:43.692016 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75p42" event={"ID":"64e0a887-70fa-4479-a9cc-464fe41291ab","Type":"ContainerDied","Data":"6cf78193f873bc188e425a473580cfc5db20f0c0691297eb7608274fe77f6178"} Dec 09 17:30:43 crc kubenswrapper[4840]: I1209 17:30:43.692205 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75p42" event={"ID":"64e0a887-70fa-4479-a9cc-464fe41291ab","Type":"ContainerStarted","Data":"a8e185e876619ea9b7e1eccbf1526ac8041198597a3cb8087839c1ff2d2efe7d"} Dec 09 17:30:43 crc kubenswrapper[4840]: I1209 17:30:43.694632 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 17:30:44 crc kubenswrapper[4840]: I1209 17:30:44.707132 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75p42" event={"ID":"64e0a887-70fa-4479-a9cc-464fe41291ab","Type":"ContainerStarted","Data":"c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2"} Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.048411 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nzv64"] Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.050507 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.082444 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nzv64"] Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.146525 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9489s\" (UniqueName: \"kubernetes.io/projected/83b65de8-a718-449a-8416-2e5abd7083d9-kube-api-access-9489s\") pod \"redhat-operators-nzv64\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.146608 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-catalog-content\") pod \"redhat-operators-nzv64\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.146716 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-utilities\") pod \"redhat-operators-nzv64\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.248523 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-catalog-content\") pod \"redhat-operators-nzv64\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.248932 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-utilities\") pod \"redhat-operators-nzv64\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.249111 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9489s\" (UniqueName: \"kubernetes.io/projected/83b65de8-a718-449a-8416-2e5abd7083d9-kube-api-access-9489s\") pod \"redhat-operators-nzv64\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.249431 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-catalog-content\") pod \"redhat-operators-nzv64\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.249454 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-utilities\") pod \"redhat-operators-nzv64\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.280711 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9489s\" (UniqueName: \"kubernetes.io/projected/83b65de8-a718-449a-8416-2e5abd7083d9-kube-api-access-9489s\") pod \"redhat-operators-nzv64\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.390105 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:45 crc kubenswrapper[4840]: I1209 17:30:45.922353 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nzv64"] Dec 09 17:30:46 crc kubenswrapper[4840]: E1209 17:30:46.610186 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:30:46 crc kubenswrapper[4840]: I1209 17:30:46.725579 4840 generic.go:334] "Generic (PLEG): container finished" podID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerID="c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2" exitCode=0 Dec 09 17:30:46 crc kubenswrapper[4840]: I1209 17:30:46.725643 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75p42" event={"ID":"64e0a887-70fa-4479-a9cc-464fe41291ab","Type":"ContainerDied","Data":"c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2"} Dec 09 17:30:46 crc kubenswrapper[4840]: I1209 17:30:46.728839 4840 generic.go:334] "Generic (PLEG): container finished" podID="83b65de8-a718-449a-8416-2e5abd7083d9" containerID="1e2a61432fcacbefdeccd44bbc840aefba55c0885a64c3f9518c890de1c18138" exitCode=0 Dec 09 17:30:46 crc kubenswrapper[4840]: I1209 17:30:46.728877 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzv64" event={"ID":"83b65de8-a718-449a-8416-2e5abd7083d9","Type":"ContainerDied","Data":"1e2a61432fcacbefdeccd44bbc840aefba55c0885a64c3f9518c890de1c18138"} Dec 09 17:30:46 crc kubenswrapper[4840]: I1209 17:30:46.728900 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzv64" event={"ID":"83b65de8-a718-449a-8416-2e5abd7083d9","Type":"ContainerStarted","Data":"d2682796ddc9836b8345bb4b454633e73b185d45411c6d48a97308c7dce1cbb5"} Dec 09 17:30:47 crc kubenswrapper[4840]: I1209 17:30:47.739257 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75p42" event={"ID":"64e0a887-70fa-4479-a9cc-464fe41291ab","Type":"ContainerStarted","Data":"0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669"} Dec 09 17:30:48 crc kubenswrapper[4840]: I1209 17:30:48.753287 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzv64" event={"ID":"83b65de8-a718-449a-8416-2e5abd7083d9","Type":"ContainerStarted","Data":"435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b"} Dec 09 17:30:48 crc kubenswrapper[4840]: I1209 17:30:48.792005 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-75p42" podStartSLOduration=4.310236765 podStartE2EDuration="7.791982952s" podCreationTimestamp="2025-12-09 17:30:41 +0000 UTC" firstStartedPulling="2025-12-09 17:30:43.694323425 +0000 UTC m=+2029.685434058" lastFinishedPulling="2025-12-09 17:30:47.176069622 +0000 UTC m=+2033.167180245" observedRunningTime="2025-12-09 17:30:47.755478977 +0000 UTC m=+2033.746589620" watchObservedRunningTime="2025-12-09 17:30:48.791982952 +0000 UTC m=+2034.783093595" Dec 09 17:30:51 crc kubenswrapper[4840]: I1209 17:30:51.786895 4840 generic.go:334] "Generic (PLEG): container finished" podID="83b65de8-a718-449a-8416-2e5abd7083d9" containerID="435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b" exitCode=0 Dec 09 17:30:51 crc kubenswrapper[4840]: I1209 17:30:51.787001 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzv64" event={"ID":"83b65de8-a718-449a-8416-2e5abd7083d9","Type":"ContainerDied","Data":"435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b"} Dec 09 17:30:52 crc kubenswrapper[4840]: I1209 17:30:52.222597 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:52 crc kubenswrapper[4840]: I1209 17:30:52.222648 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:30:53 crc kubenswrapper[4840]: I1209 17:30:53.279021 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-75p42" podUID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerName="registry-server" probeResult="failure" output=< Dec 09 17:30:53 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 17:30:53 crc kubenswrapper[4840]: > Dec 09 17:30:54 crc kubenswrapper[4840]: I1209 17:30:54.848746 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzv64" event={"ID":"83b65de8-a718-449a-8416-2e5abd7083d9","Type":"ContainerStarted","Data":"2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2"} Dec 09 17:30:54 crc kubenswrapper[4840]: I1209 17:30:54.875352 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nzv64" podStartSLOduration=2.953004183 podStartE2EDuration="9.875333793s" podCreationTimestamp="2025-12-09 17:30:45 +0000 UTC" firstStartedPulling="2025-12-09 17:30:46.7303185 +0000 UTC m=+2032.721429133" lastFinishedPulling="2025-12-09 17:30:53.65264811 +0000 UTC m=+2039.643758743" observedRunningTime="2025-12-09 17:30:54.873658605 +0000 UTC m=+2040.864769238" watchObservedRunningTime="2025-12-09 17:30:54.875333793 +0000 UTC m=+2040.866444426" Dec 09 17:30:55 crc kubenswrapper[4840]: I1209 17:30:55.390243 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:55 crc kubenswrapper[4840]: I1209 17:30:55.390565 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:30:56 crc kubenswrapper[4840]: I1209 17:30:56.444111 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nzv64" podUID="83b65de8-a718-449a-8416-2e5abd7083d9" containerName="registry-server" probeResult="failure" output=< Dec 09 17:30:56 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 17:30:56 crc kubenswrapper[4840]: > Dec 09 17:30:56 crc kubenswrapper[4840]: E1209 17:30:56.610591 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:30:58 crc kubenswrapper[4840]: I1209 17:30:58.102808 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-9mn25"] Dec 09 17:30:58 crc kubenswrapper[4840]: I1209 17:30:58.114627 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-9mn25"] Dec 09 17:30:58 crc kubenswrapper[4840]: E1209 17:30:58.614607 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:30:58 crc kubenswrapper[4840]: I1209 17:30:58.622151 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a569cc0-6ebe-431c-af1b-3c7560dc2954" path="/var/lib/kubelet/pods/0a569cc0-6ebe-431c-af1b-3c7560dc2954/volumes" Dec 09 17:31:02 crc kubenswrapper[4840]: I1209 17:31:02.275645 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:31:02 crc kubenswrapper[4840]: I1209 17:31:02.342656 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:31:02 crc kubenswrapper[4840]: I1209 17:31:02.533863 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-75p42"] Dec 09 17:31:03 crc kubenswrapper[4840]: I1209 17:31:03.940866 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-75p42" podUID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerName="registry-server" containerID="cri-o://0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669" gracePeriod=2 Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.036179 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.036232 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.036273 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.037014 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1b8939d779db3be6289d63ad3f90e0b23788c74d7a8574e72d6dce9560817730"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.037066 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://1b8939d779db3be6289d63ad3f90e0b23788c74d7a8574e72d6dce9560817730" gracePeriod=600 Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.562340 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.685494 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-catalog-content\") pod \"64e0a887-70fa-4479-a9cc-464fe41291ab\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.685558 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-utilities\") pod \"64e0a887-70fa-4479-a9cc-464fe41291ab\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.685660 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skds7\" (UniqueName: \"kubernetes.io/projected/64e0a887-70fa-4479-a9cc-464fe41291ab-kube-api-access-skds7\") pod \"64e0a887-70fa-4479-a9cc-464fe41291ab\" (UID: \"64e0a887-70fa-4479-a9cc-464fe41291ab\") " Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.686286 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-utilities" (OuterVolumeSpecName: "utilities") pod "64e0a887-70fa-4479-a9cc-464fe41291ab" (UID: "64e0a887-70fa-4479-a9cc-464fe41291ab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.690461 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64e0a887-70fa-4479-a9cc-464fe41291ab-kube-api-access-skds7" (OuterVolumeSpecName: "kube-api-access-skds7") pod "64e0a887-70fa-4479-a9cc-464fe41291ab" (UID: "64e0a887-70fa-4479-a9cc-464fe41291ab"). InnerVolumeSpecName "kube-api-access-skds7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.738062 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "64e0a887-70fa-4479-a9cc-464fe41291ab" (UID: "64e0a887-70fa-4479-a9cc-464fe41291ab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.788753 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.789061 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skds7\" (UniqueName: \"kubernetes.io/projected/64e0a887-70fa-4479-a9cc-464fe41291ab-kube-api-access-skds7\") on node \"crc\" DevicePath \"\"" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.789079 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64e0a887-70fa-4479-a9cc-464fe41291ab-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.954286 4840 generic.go:334] "Generic (PLEG): container finished" podID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerID="0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669" exitCode=0 Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.954387 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75p42" event={"ID":"64e0a887-70fa-4479-a9cc-464fe41291ab","Type":"ContainerDied","Data":"0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669"} Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.954409 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75p42" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.954419 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75p42" event={"ID":"64e0a887-70fa-4479-a9cc-464fe41291ab","Type":"ContainerDied","Data":"a8e185e876619ea9b7e1eccbf1526ac8041198597a3cb8087839c1ff2d2efe7d"} Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.954460 4840 scope.go:117] "RemoveContainer" containerID="0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669" Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.960077 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="1b8939d779db3be6289d63ad3f90e0b23788c74d7a8574e72d6dce9560817730" exitCode=0 Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.960155 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"1b8939d779db3be6289d63ad3f90e0b23788c74d7a8574e72d6dce9560817730"} Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.960348 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d"} Dec 09 17:31:04 crc kubenswrapper[4840]: I1209 17:31:04.982356 4840 scope.go:117] "RemoveContainer" containerID="c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2" Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:04.999997 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-75p42"] Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:05.007296 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-75p42"] Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:05.011951 4840 scope.go:117] "RemoveContainer" containerID="6cf78193f873bc188e425a473580cfc5db20f0c0691297eb7608274fe77f6178" Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:05.036899 4840 scope.go:117] "RemoveContainer" containerID="0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669" Dec 09 17:31:05 crc kubenswrapper[4840]: E1209 17:31:05.037290 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669\": container with ID starting with 0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669 not found: ID does not exist" containerID="0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669" Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:05.037425 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669"} err="failed to get container status \"0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669\": rpc error: code = NotFound desc = could not find container \"0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669\": container with ID starting with 0f54bfdf8b22dc19181b13420e463bd6b724339efbeb8b376895cdb95e01e669 not found: ID does not exist" Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:05.037516 4840 scope.go:117] "RemoveContainer" containerID="c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2" Dec 09 17:31:05 crc kubenswrapper[4840]: E1209 17:31:05.037891 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2\": container with ID starting with c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2 not found: ID does not exist" containerID="c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2" Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:05.038009 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2"} err="failed to get container status \"c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2\": rpc error: code = NotFound desc = could not find container \"c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2\": container with ID starting with c172345edf18b324ff509caef8b57331ccb16f924670360f878fd70ffd8e11d2 not found: ID does not exist" Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:05.038053 4840 scope.go:117] "RemoveContainer" containerID="6cf78193f873bc188e425a473580cfc5db20f0c0691297eb7608274fe77f6178" Dec 09 17:31:05 crc kubenswrapper[4840]: E1209 17:31:05.038399 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cf78193f873bc188e425a473580cfc5db20f0c0691297eb7608274fe77f6178\": container with ID starting with 6cf78193f873bc188e425a473580cfc5db20f0c0691297eb7608274fe77f6178 not found: ID does not exist" containerID="6cf78193f873bc188e425a473580cfc5db20f0c0691297eb7608274fe77f6178" Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:05.038437 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cf78193f873bc188e425a473580cfc5db20f0c0691297eb7608274fe77f6178"} err="failed to get container status \"6cf78193f873bc188e425a473580cfc5db20f0c0691297eb7608274fe77f6178\": rpc error: code = NotFound desc = could not find container \"6cf78193f873bc188e425a473580cfc5db20f0c0691297eb7608274fe77f6178\": container with ID starting with 6cf78193f873bc188e425a473580cfc5db20f0c0691297eb7608274fe77f6178 not found: ID does not exist" Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:05.038464 4840 scope.go:117] "RemoveContainer" containerID="f099ee68aa04c864ee7120c29505718a2d5a60de39e3e2b5c32b2c82041708f3" Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:05.443713 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:31:05 crc kubenswrapper[4840]: I1209 17:31:05.498788 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:31:06 crc kubenswrapper[4840]: I1209 17:31:06.628389 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64e0a887-70fa-4479-a9cc-464fe41291ab" path="/var/lib/kubelet/pods/64e0a887-70fa-4479-a9cc-464fe41291ab/volumes" Dec 09 17:31:06 crc kubenswrapper[4840]: I1209 17:31:06.916958 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nzv64"] Dec 09 17:31:07 crc kubenswrapper[4840]: I1209 17:31:07.000338 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nzv64" podUID="83b65de8-a718-449a-8416-2e5abd7083d9" containerName="registry-server" containerID="cri-o://2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2" gracePeriod=2 Dec 09 17:31:07 crc kubenswrapper[4840]: I1209 17:31:07.522267 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:31:07 crc kubenswrapper[4840]: I1209 17:31:07.644850 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9489s\" (UniqueName: \"kubernetes.io/projected/83b65de8-a718-449a-8416-2e5abd7083d9-kube-api-access-9489s\") pod \"83b65de8-a718-449a-8416-2e5abd7083d9\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " Dec 09 17:31:07 crc kubenswrapper[4840]: I1209 17:31:07.644976 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-utilities\") pod \"83b65de8-a718-449a-8416-2e5abd7083d9\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " Dec 09 17:31:07 crc kubenswrapper[4840]: I1209 17:31:07.645226 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-catalog-content\") pod \"83b65de8-a718-449a-8416-2e5abd7083d9\" (UID: \"83b65de8-a718-449a-8416-2e5abd7083d9\") " Dec 09 17:31:07 crc kubenswrapper[4840]: I1209 17:31:07.645770 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-utilities" (OuterVolumeSpecName: "utilities") pod "83b65de8-a718-449a-8416-2e5abd7083d9" (UID: "83b65de8-a718-449a-8416-2e5abd7083d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:31:07 crc kubenswrapper[4840]: I1209 17:31:07.653606 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83b65de8-a718-449a-8416-2e5abd7083d9-kube-api-access-9489s" (OuterVolumeSpecName: "kube-api-access-9489s") pod "83b65de8-a718-449a-8416-2e5abd7083d9" (UID: "83b65de8-a718-449a-8416-2e5abd7083d9"). InnerVolumeSpecName "kube-api-access-9489s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:31:07 crc kubenswrapper[4840]: I1209 17:31:07.747635 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9489s\" (UniqueName: \"kubernetes.io/projected/83b65de8-a718-449a-8416-2e5abd7083d9-kube-api-access-9489s\") on node \"crc\" DevicePath \"\"" Dec 09 17:31:07 crc kubenswrapper[4840]: I1209 17:31:07.747899 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:31:07 crc kubenswrapper[4840]: I1209 17:31:07.773892 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "83b65de8-a718-449a-8416-2e5abd7083d9" (UID: "83b65de8-a718-449a-8416-2e5abd7083d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:31:07 crc kubenswrapper[4840]: I1209 17:31:07.849458 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b65de8-a718-449a-8416-2e5abd7083d9-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.015311 4840 generic.go:334] "Generic (PLEG): container finished" podID="83b65de8-a718-449a-8416-2e5abd7083d9" containerID="2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2" exitCode=0 Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.015461 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzv64" Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.015465 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzv64" event={"ID":"83b65de8-a718-449a-8416-2e5abd7083d9","Type":"ContainerDied","Data":"2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2"} Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.015886 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzv64" event={"ID":"83b65de8-a718-449a-8416-2e5abd7083d9","Type":"ContainerDied","Data":"d2682796ddc9836b8345bb4b454633e73b185d45411c6d48a97308c7dce1cbb5"} Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.015936 4840 scope.go:117] "RemoveContainer" containerID="2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2" Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.062872 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nzv64"] Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.063528 4840 scope.go:117] "RemoveContainer" containerID="435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b" Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.083932 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nzv64"] Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.088123 4840 scope.go:117] "RemoveContainer" containerID="1e2a61432fcacbefdeccd44bbc840aefba55c0885a64c3f9518c890de1c18138" Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.141505 4840 scope.go:117] "RemoveContainer" containerID="2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2" Dec 09 17:31:08 crc kubenswrapper[4840]: E1209 17:31:08.141915 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2\": container with ID starting with 2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2 not found: ID does not exist" containerID="2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2" Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.141975 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2"} err="failed to get container status \"2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2\": rpc error: code = NotFound desc = could not find container \"2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2\": container with ID starting with 2cbfd9f82878e93bd27755c937229a0c40bb82a74fd9a61cf1087604379617c2 not found: ID does not exist" Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.142002 4840 scope.go:117] "RemoveContainer" containerID="435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b" Dec 09 17:31:08 crc kubenswrapper[4840]: E1209 17:31:08.142699 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b\": container with ID starting with 435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b not found: ID does not exist" containerID="435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b" Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.142723 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b"} err="failed to get container status \"435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b\": rpc error: code = NotFound desc = could not find container \"435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b\": container with ID starting with 435b14fa0803da586a44e2607ff27b3bd2e6923f1cc49352aa0bf7daa391740b not found: ID does not exist" Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.142736 4840 scope.go:117] "RemoveContainer" containerID="1e2a61432fcacbefdeccd44bbc840aefba55c0885a64c3f9518c890de1c18138" Dec 09 17:31:08 crc kubenswrapper[4840]: E1209 17:31:08.143391 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e2a61432fcacbefdeccd44bbc840aefba55c0885a64c3f9518c890de1c18138\": container with ID starting with 1e2a61432fcacbefdeccd44bbc840aefba55c0885a64c3f9518c890de1c18138 not found: ID does not exist" containerID="1e2a61432fcacbefdeccd44bbc840aefba55c0885a64c3f9518c890de1c18138" Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.143540 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e2a61432fcacbefdeccd44bbc840aefba55c0885a64c3f9518c890de1c18138"} err="failed to get container status \"1e2a61432fcacbefdeccd44bbc840aefba55c0885a64c3f9518c890de1c18138\": rpc error: code = NotFound desc = could not find container \"1e2a61432fcacbefdeccd44bbc840aefba55c0885a64c3f9518c890de1c18138\": container with ID starting with 1e2a61432fcacbefdeccd44bbc840aefba55c0885a64c3f9518c890de1c18138 not found: ID does not exist" Dec 09 17:31:08 crc kubenswrapper[4840]: I1209 17:31:08.619999 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83b65de8-a718-449a-8416-2e5abd7083d9" path="/var/lib/kubelet/pods/83b65de8-a718-449a-8416-2e5abd7083d9/volumes" Dec 09 17:31:10 crc kubenswrapper[4840]: E1209 17:31:10.612132 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:31:11 crc kubenswrapper[4840]: I1209 17:31:11.515049 4840 scope.go:117] "RemoveContainer" containerID="7f7ccb7739c4654a9cb30f9feceba39ea3c5cdb5bb82448b91b77747d2d8188a" Dec 09 17:31:11 crc kubenswrapper[4840]: I1209 17:31:11.583254 4840 scope.go:117] "RemoveContainer" containerID="17f6ef87e93d07684ffe1c68df19721aa9f2b1aaa68647880ad37249a297e4b4" Dec 09 17:31:11 crc kubenswrapper[4840]: E1209 17:31:11.610778 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:31:11 crc kubenswrapper[4840]: I1209 17:31:11.644257 4840 scope.go:117] "RemoveContainer" containerID="be0aff0214ef7a52186961dc7d97cf3fd19600c913b98730b0dff858c95e82a0" Dec 09 17:31:21 crc kubenswrapper[4840]: E1209 17:31:21.612716 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:31:26 crc kubenswrapper[4840]: E1209 17:31:26.611622 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:31:34 crc kubenswrapper[4840]: E1209 17:31:34.618271 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:31:38 crc kubenswrapper[4840]: E1209 17:31:38.611946 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:31:46 crc kubenswrapper[4840]: E1209 17:31:46.611774 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:31:49 crc kubenswrapper[4840]: E1209 17:31:49.611780 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:31:58 crc kubenswrapper[4840]: E1209 17:31:58.611024 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:32:03 crc kubenswrapper[4840]: E1209 17:32:03.610529 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:32:10 crc kubenswrapper[4840]: E1209 17:32:10.610725 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:32:14 crc kubenswrapper[4840]: E1209 17:32:14.638292 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:32:22 crc kubenswrapper[4840]: E1209 17:32:22.611926 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:32:29 crc kubenswrapper[4840]: E1209 17:32:29.611160 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:32:35 crc kubenswrapper[4840]: E1209 17:32:35.694790 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:32:35 crc kubenswrapper[4840]: E1209 17:32:35.695415 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:32:35 crc kubenswrapper[4840]: E1209 17:32:35.695565 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:32:35 crc kubenswrapper[4840]: E1209 17:32:35.696756 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:32:42 crc kubenswrapper[4840]: E1209 17:32:42.735519 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:32:42 crc kubenswrapper[4840]: E1209 17:32:42.736108 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:32:42 crc kubenswrapper[4840]: E1209 17:32:42.736240 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:32:42 crc kubenswrapper[4840]: E1209 17:32:42.737514 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:32:48 crc kubenswrapper[4840]: E1209 17:32:48.610897 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:32:54 crc kubenswrapper[4840]: E1209 17:32:54.620622 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:33:02 crc kubenswrapper[4840]: E1209 17:33:02.619413 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:33:04 crc kubenswrapper[4840]: I1209 17:33:04.035905 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:33:04 crc kubenswrapper[4840]: I1209 17:33:04.036668 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:33:05 crc kubenswrapper[4840]: E1209 17:33:05.611567 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:33:14 crc kubenswrapper[4840]: E1209 17:33:14.616446 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:33:18 crc kubenswrapper[4840]: E1209 17:33:18.618590 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:33:27 crc kubenswrapper[4840]: E1209 17:33:27.614287 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:33:31 crc kubenswrapper[4840]: E1209 17:33:31.611643 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:33:34 crc kubenswrapper[4840]: I1209 17:33:34.036650 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:33:34 crc kubenswrapper[4840]: I1209 17:33:34.037644 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:33:38 crc kubenswrapper[4840]: E1209 17:33:38.610258 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:33:44 crc kubenswrapper[4840]: I1209 17:33:44.672868 4840 generic.go:334] "Generic (PLEG): container finished" podID="b6a0155a-f3d1-4a80-ad83-1fcce8d5de36" containerID="b21293e410947e7f6a5069aba5fc81c95fce88d2cbba708672b854b61bbc19ba" exitCode=2 Dec 09 17:33:44 crc kubenswrapper[4840]: I1209 17:33:44.673054 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" event={"ID":"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36","Type":"ContainerDied","Data":"b21293e410947e7f6a5069aba5fc81c95fce88d2cbba708672b854b61bbc19ba"} Dec 09 17:33:45 crc kubenswrapper[4840]: E1209 17:33:45.610940 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.311023 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.494880 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-ssh-key\") pod \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.495090 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-inventory\") pod \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.495561 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjdz9\" (UniqueName: \"kubernetes.io/projected/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-kube-api-access-zjdz9\") pod \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\" (UID: \"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36\") " Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.508493 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-kube-api-access-zjdz9" (OuterVolumeSpecName: "kube-api-access-zjdz9") pod "b6a0155a-f3d1-4a80-ad83-1fcce8d5de36" (UID: "b6a0155a-f3d1-4a80-ad83-1fcce8d5de36"). InnerVolumeSpecName "kube-api-access-zjdz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.551698 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-inventory" (OuterVolumeSpecName: "inventory") pod "b6a0155a-f3d1-4a80-ad83-1fcce8d5de36" (UID: "b6a0155a-f3d1-4a80-ad83-1fcce8d5de36"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.557630 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b6a0155a-f3d1-4a80-ad83-1fcce8d5de36" (UID: "b6a0155a-f3d1-4a80-ad83-1fcce8d5de36"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.598898 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjdz9\" (UniqueName: \"kubernetes.io/projected/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-kube-api-access-zjdz9\") on node \"crc\" DevicePath \"\"" Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.598990 4840 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.599005 4840 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6a0155a-f3d1-4a80-ad83-1fcce8d5de36-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.693556 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" event={"ID":"b6a0155a-f3d1-4a80-ad83-1fcce8d5de36","Type":"ContainerDied","Data":"69f41ff711634da7b3abadd4e912add0ceb73b65b69b8be32d97c5ebabe24e5d"} Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.693613 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69f41ff711634da7b3abadd4e912add0ceb73b65b69b8be32d97c5ebabe24e5d" Dec 09 17:33:46 crc kubenswrapper[4840]: I1209 17:33:46.693614 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7" Dec 09 17:33:51 crc kubenswrapper[4840]: E1209 17:33:51.612274 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.045364 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl"] Dec 09 17:33:54 crc kubenswrapper[4840]: E1209 17:33:54.048108 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b65de8-a718-449a-8416-2e5abd7083d9" containerName="extract-content" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.048305 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b65de8-a718-449a-8416-2e5abd7083d9" containerName="extract-content" Dec 09 17:33:54 crc kubenswrapper[4840]: E1209 17:33:54.048449 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b65de8-a718-449a-8416-2e5abd7083d9" containerName="registry-server" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.048571 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b65de8-a718-449a-8416-2e5abd7083d9" containerName="registry-server" Dec 09 17:33:54 crc kubenswrapper[4840]: E1209 17:33:54.048734 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerName="registry-server" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.048856 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerName="registry-server" Dec 09 17:33:54 crc kubenswrapper[4840]: E1209 17:33:54.049034 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerName="extract-utilities" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.049189 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerName="extract-utilities" Dec 09 17:33:54 crc kubenswrapper[4840]: E1209 17:33:54.049338 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerName="extract-content" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.049462 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerName="extract-content" Dec 09 17:33:54 crc kubenswrapper[4840]: E1209 17:33:54.049588 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6a0155a-f3d1-4a80-ad83-1fcce8d5de36" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.049716 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6a0155a-f3d1-4a80-ad83-1fcce8d5de36" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:33:54 crc kubenswrapper[4840]: E1209 17:33:54.049871 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b65de8-a718-449a-8416-2e5abd7083d9" containerName="extract-utilities" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.050041 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b65de8-a718-449a-8416-2e5abd7083d9" containerName="extract-utilities" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.050546 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="83b65de8-a718-449a-8416-2e5abd7083d9" containerName="registry-server" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.050712 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="64e0a887-70fa-4479-a9cc-464fe41291ab" containerName="registry-server" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.050871 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6a0155a-f3d1-4a80-ad83-1fcce8d5de36" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.052305 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.055248 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.056009 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qrgfg" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.056265 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.057164 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.059245 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl"] Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.090068 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.090156 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4n8v\" (UniqueName: \"kubernetes.io/projected/7fb2365c-d487-44bb-8096-85400eb2f6ee-kube-api-access-g4n8v\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.090394 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.193222 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.193396 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.193510 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4n8v\" (UniqueName: \"kubernetes.io/projected/7fb2365c-d487-44bb-8096-85400eb2f6ee-kube-api-access-g4n8v\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.201844 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.202489 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.211075 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4n8v\" (UniqueName: \"kubernetes.io/projected/7fb2365c-d487-44bb-8096-85400eb2f6ee-kube-api-access-g4n8v\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.413944 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:33:54 crc kubenswrapper[4840]: I1209 17:33:54.997000 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl"] Dec 09 17:33:55 crc kubenswrapper[4840]: I1209 17:33:55.557200 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 17:33:55 crc kubenswrapper[4840]: I1209 17:33:55.786711 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" event={"ID":"7fb2365c-d487-44bb-8096-85400eb2f6ee","Type":"ContainerStarted","Data":"bcff211dd490f3817b9636ba07e0d4a96dedcf0707e15d2620306d7173dee732"} Dec 09 17:33:56 crc kubenswrapper[4840]: I1209 17:33:56.797374 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" event={"ID":"7fb2365c-d487-44bb-8096-85400eb2f6ee","Type":"ContainerStarted","Data":"637e2b3b820cf8dedb9e591afba8e30c74c2408bcfdb5aa1e392f5cc4ff98035"} Dec 09 17:33:56 crc kubenswrapper[4840]: I1209 17:33:56.827101 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" podStartSLOduration=2.2676080880000002 podStartE2EDuration="2.82707554s" podCreationTimestamp="2025-12-09 17:33:54 +0000 UTC" firstStartedPulling="2025-12-09 17:33:54.994124031 +0000 UTC m=+2220.985234704" lastFinishedPulling="2025-12-09 17:33:55.553591523 +0000 UTC m=+2221.544702156" observedRunningTime="2025-12-09 17:33:56.813234754 +0000 UTC m=+2222.804345417" watchObservedRunningTime="2025-12-09 17:33:56.82707554 +0000 UTC m=+2222.818186183" Dec 09 17:33:58 crc kubenswrapper[4840]: E1209 17:33:58.610571 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:34:04 crc kubenswrapper[4840]: I1209 17:34:04.036198 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:34:04 crc kubenswrapper[4840]: I1209 17:34:04.036765 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:34:04 crc kubenswrapper[4840]: I1209 17:34:04.036807 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:34:04 crc kubenswrapper[4840]: I1209 17:34:04.037602 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:34:04 crc kubenswrapper[4840]: I1209 17:34:04.037662 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" gracePeriod=600 Dec 09 17:34:04 crc kubenswrapper[4840]: E1209 17:34:04.173864 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:34:04 crc kubenswrapper[4840]: I1209 17:34:04.887973 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" exitCode=0 Dec 09 17:34:04 crc kubenswrapper[4840]: I1209 17:34:04.887995 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d"} Dec 09 17:34:04 crc kubenswrapper[4840]: I1209 17:34:04.888365 4840 scope.go:117] "RemoveContainer" containerID="1b8939d779db3be6289d63ad3f90e0b23788c74d7a8574e72d6dce9560817730" Dec 09 17:34:04 crc kubenswrapper[4840]: I1209 17:34:04.889112 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:34:04 crc kubenswrapper[4840]: E1209 17:34:04.889421 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:34:05 crc kubenswrapper[4840]: E1209 17:34:05.609743 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:34:06 crc kubenswrapper[4840]: I1209 17:34:06.829596 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pts7d"] Dec 09 17:34:06 crc kubenswrapper[4840]: I1209 17:34:06.833922 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:06 crc kubenswrapper[4840]: I1209 17:34:06.844947 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pts7d"] Dec 09 17:34:06 crc kubenswrapper[4840]: I1209 17:34:06.972900 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-catalog-content\") pod \"community-operators-pts7d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:06 crc kubenswrapper[4840]: I1209 17:34:06.973129 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvmpg\" (UniqueName: \"kubernetes.io/projected/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-kube-api-access-zvmpg\") pod \"community-operators-pts7d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:06 crc kubenswrapper[4840]: I1209 17:34:06.973340 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-utilities\") pod \"community-operators-pts7d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:07 crc kubenswrapper[4840]: I1209 17:34:07.075158 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvmpg\" (UniqueName: \"kubernetes.io/projected/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-kube-api-access-zvmpg\") pod \"community-operators-pts7d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:07 crc kubenswrapper[4840]: I1209 17:34:07.075333 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-utilities\") pod \"community-operators-pts7d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:07 crc kubenswrapper[4840]: I1209 17:34:07.075433 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-catalog-content\") pod \"community-operators-pts7d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:07 crc kubenswrapper[4840]: I1209 17:34:07.076087 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-catalog-content\") pod \"community-operators-pts7d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:07 crc kubenswrapper[4840]: I1209 17:34:07.076390 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-utilities\") pod \"community-operators-pts7d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:07 crc kubenswrapper[4840]: I1209 17:34:07.107801 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvmpg\" (UniqueName: \"kubernetes.io/projected/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-kube-api-access-zvmpg\") pod \"community-operators-pts7d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:07 crc kubenswrapper[4840]: I1209 17:34:07.153660 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:07 crc kubenswrapper[4840]: I1209 17:34:07.742610 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pts7d"] Dec 09 17:34:07 crc kubenswrapper[4840]: I1209 17:34:07.936177 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pts7d" event={"ID":"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d","Type":"ContainerStarted","Data":"f70e6f82cedf2f5e09a908ab5ff88cef346cde61c0953262f1da70e91ef86803"} Dec 09 17:34:08 crc kubenswrapper[4840]: I1209 17:34:08.947457 4840 generic.go:334] "Generic (PLEG): container finished" podID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" containerID="66b8236d35b7c6d34f1b8296f5bb4d7ba6babff274639d21f228f95bd583f9c0" exitCode=0 Dec 09 17:34:08 crc kubenswrapper[4840]: I1209 17:34:08.947510 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pts7d" event={"ID":"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d","Type":"ContainerDied","Data":"66b8236d35b7c6d34f1b8296f5bb4d7ba6babff274639d21f228f95bd583f9c0"} Dec 09 17:34:11 crc kubenswrapper[4840]: E1209 17:34:11.610521 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:34:11 crc kubenswrapper[4840]: I1209 17:34:11.978249 4840 generic.go:334] "Generic (PLEG): container finished" podID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" containerID="ea9d1b9dc8f213cf76dad5332ea9d4aba5b155755d2cd39d713da34466369e7a" exitCode=0 Dec 09 17:34:11 crc kubenswrapper[4840]: I1209 17:34:11.978308 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pts7d" event={"ID":"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d","Type":"ContainerDied","Data":"ea9d1b9dc8f213cf76dad5332ea9d4aba5b155755d2cd39d713da34466369e7a"} Dec 09 17:34:12 crc kubenswrapper[4840]: I1209 17:34:12.988416 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pts7d" event={"ID":"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d","Type":"ContainerStarted","Data":"4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4"} Dec 09 17:34:13 crc kubenswrapper[4840]: I1209 17:34:13.019299 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pts7d" podStartSLOduration=3.499161469 podStartE2EDuration="7.019276875s" podCreationTimestamp="2025-12-09 17:34:06 +0000 UTC" firstStartedPulling="2025-12-09 17:34:08.949469841 +0000 UTC m=+2234.940580474" lastFinishedPulling="2025-12-09 17:34:12.469585237 +0000 UTC m=+2238.460695880" observedRunningTime="2025-12-09 17:34:13.01062012 +0000 UTC m=+2239.001730753" watchObservedRunningTime="2025-12-09 17:34:13.019276875 +0000 UTC m=+2239.010387508" Dec 09 17:34:15 crc kubenswrapper[4840]: I1209 17:34:15.609195 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:34:15 crc kubenswrapper[4840]: E1209 17:34:15.610169 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:34:17 crc kubenswrapper[4840]: I1209 17:34:17.154605 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:17 crc kubenswrapper[4840]: I1209 17:34:17.154952 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:17 crc kubenswrapper[4840]: I1209 17:34:17.223826 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:18 crc kubenswrapper[4840]: I1209 17:34:18.134696 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:18 crc kubenswrapper[4840]: I1209 17:34:18.181108 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pts7d"] Dec 09 17:34:20 crc kubenswrapper[4840]: I1209 17:34:20.079339 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pts7d" podUID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" containerName="registry-server" containerID="cri-o://4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4" gracePeriod=2 Dec 09 17:34:20 crc kubenswrapper[4840]: E1209 17:34:20.610062 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:34:20 crc kubenswrapper[4840]: I1209 17:34:20.704462 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:20 crc kubenswrapper[4840]: I1209 17:34:20.775848 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-catalog-content\") pod \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " Dec 09 17:34:20 crc kubenswrapper[4840]: I1209 17:34:20.776187 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvmpg\" (UniqueName: \"kubernetes.io/projected/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-kube-api-access-zvmpg\") pod \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " Dec 09 17:34:20 crc kubenswrapper[4840]: I1209 17:34:20.776318 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-utilities\") pod \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\" (UID: \"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d\") " Dec 09 17:34:20 crc kubenswrapper[4840]: I1209 17:34:20.777536 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-utilities" (OuterVolumeSpecName: "utilities") pod "064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" (UID: "064e7a2d-30f6-4a56-85e6-824c8b2b5f2d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:34:20 crc kubenswrapper[4840]: I1209 17:34:20.781331 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-kube-api-access-zvmpg" (OuterVolumeSpecName: "kube-api-access-zvmpg") pod "064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" (UID: "064e7a2d-30f6-4a56-85e6-824c8b2b5f2d"). InnerVolumeSpecName "kube-api-access-zvmpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:34:20 crc kubenswrapper[4840]: I1209 17:34:20.834203 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" (UID: "064e7a2d-30f6-4a56-85e6-824c8b2b5f2d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:34:20 crc kubenswrapper[4840]: I1209 17:34:20.879004 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvmpg\" (UniqueName: \"kubernetes.io/projected/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-kube-api-access-zvmpg\") on node \"crc\" DevicePath \"\"" Dec 09 17:34:20 crc kubenswrapper[4840]: I1209 17:34:20.879046 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:34:20 crc kubenswrapper[4840]: I1209 17:34:20.879059 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.091508 4840 generic.go:334] "Generic (PLEG): container finished" podID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" containerID="4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4" exitCode=0 Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.091591 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pts7d" event={"ID":"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d","Type":"ContainerDied","Data":"4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4"} Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.091648 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pts7d" event={"ID":"064e7a2d-30f6-4a56-85e6-824c8b2b5f2d","Type":"ContainerDied","Data":"f70e6f82cedf2f5e09a908ab5ff88cef346cde61c0953262f1da70e91ef86803"} Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.091685 4840 scope.go:117] "RemoveContainer" containerID="4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4" Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.091898 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pts7d" Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.123314 4840 scope.go:117] "RemoveContainer" containerID="ea9d1b9dc8f213cf76dad5332ea9d4aba5b155755d2cd39d713da34466369e7a" Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.130057 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pts7d"] Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.157764 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pts7d"] Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.159153 4840 scope.go:117] "RemoveContainer" containerID="66b8236d35b7c6d34f1b8296f5bb4d7ba6babff274639d21f228f95bd583f9c0" Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.201409 4840 scope.go:117] "RemoveContainer" containerID="4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4" Dec 09 17:34:21 crc kubenswrapper[4840]: E1209 17:34:21.202027 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4\": container with ID starting with 4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4 not found: ID does not exist" containerID="4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4" Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.202095 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4"} err="failed to get container status \"4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4\": rpc error: code = NotFound desc = could not find container \"4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4\": container with ID starting with 4f13fc91614fac5f4ed1077a3062790e2960d1d4335051902f8b180ac91a3af4 not found: ID does not exist" Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.202120 4840 scope.go:117] "RemoveContainer" containerID="ea9d1b9dc8f213cf76dad5332ea9d4aba5b155755d2cd39d713da34466369e7a" Dec 09 17:34:21 crc kubenswrapper[4840]: E1209 17:34:21.202657 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea9d1b9dc8f213cf76dad5332ea9d4aba5b155755d2cd39d713da34466369e7a\": container with ID starting with ea9d1b9dc8f213cf76dad5332ea9d4aba5b155755d2cd39d713da34466369e7a not found: ID does not exist" containerID="ea9d1b9dc8f213cf76dad5332ea9d4aba5b155755d2cd39d713da34466369e7a" Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.202681 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea9d1b9dc8f213cf76dad5332ea9d4aba5b155755d2cd39d713da34466369e7a"} err="failed to get container status \"ea9d1b9dc8f213cf76dad5332ea9d4aba5b155755d2cd39d713da34466369e7a\": rpc error: code = NotFound desc = could not find container \"ea9d1b9dc8f213cf76dad5332ea9d4aba5b155755d2cd39d713da34466369e7a\": container with ID starting with ea9d1b9dc8f213cf76dad5332ea9d4aba5b155755d2cd39d713da34466369e7a not found: ID does not exist" Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.202705 4840 scope.go:117] "RemoveContainer" containerID="66b8236d35b7c6d34f1b8296f5bb4d7ba6babff274639d21f228f95bd583f9c0" Dec 09 17:34:21 crc kubenswrapper[4840]: E1209 17:34:21.203016 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66b8236d35b7c6d34f1b8296f5bb4d7ba6babff274639d21f228f95bd583f9c0\": container with ID starting with 66b8236d35b7c6d34f1b8296f5bb4d7ba6babff274639d21f228f95bd583f9c0 not found: ID does not exist" containerID="66b8236d35b7c6d34f1b8296f5bb4d7ba6babff274639d21f228f95bd583f9c0" Dec 09 17:34:21 crc kubenswrapper[4840]: I1209 17:34:21.203047 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66b8236d35b7c6d34f1b8296f5bb4d7ba6babff274639d21f228f95bd583f9c0"} err="failed to get container status \"66b8236d35b7c6d34f1b8296f5bb4d7ba6babff274639d21f228f95bd583f9c0\": rpc error: code = NotFound desc = could not find container \"66b8236d35b7c6d34f1b8296f5bb4d7ba6babff274639d21f228f95bd583f9c0\": container with ID starting with 66b8236d35b7c6d34f1b8296f5bb4d7ba6babff274639d21f228f95bd583f9c0 not found: ID does not exist" Dec 09 17:34:22 crc kubenswrapper[4840]: I1209 17:34:22.621723 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" path="/var/lib/kubelet/pods/064e7a2d-30f6-4a56-85e6-824c8b2b5f2d/volumes" Dec 09 17:34:25 crc kubenswrapper[4840]: E1209 17:34:25.612651 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:34:28 crc kubenswrapper[4840]: I1209 17:34:28.609470 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:34:28 crc kubenswrapper[4840]: E1209 17:34:28.610079 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:34:32 crc kubenswrapper[4840]: E1209 17:34:32.610619 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:34:36 crc kubenswrapper[4840]: E1209 17:34:36.611116 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:34:39 crc kubenswrapper[4840]: I1209 17:34:39.608196 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:34:39 crc kubenswrapper[4840]: E1209 17:34:39.608743 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:34:43 crc kubenswrapper[4840]: E1209 17:34:43.610824 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:34:50 crc kubenswrapper[4840]: E1209 17:34:50.611506 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:34:51 crc kubenswrapper[4840]: I1209 17:34:51.608704 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:34:51 crc kubenswrapper[4840]: E1209 17:34:51.609326 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:34:57 crc kubenswrapper[4840]: E1209 17:34:57.611113 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:35:04 crc kubenswrapper[4840]: E1209 17:35:04.617251 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:35:05 crc kubenswrapper[4840]: I1209 17:35:05.609065 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:35:05 crc kubenswrapper[4840]: E1209 17:35:05.609289 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:35:08 crc kubenswrapper[4840]: E1209 17:35:08.612096 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:35:15 crc kubenswrapper[4840]: E1209 17:35:15.612383 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:35:18 crc kubenswrapper[4840]: I1209 17:35:18.608612 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:35:18 crc kubenswrapper[4840]: E1209 17:35:18.609221 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:35:23 crc kubenswrapper[4840]: E1209 17:35:23.611468 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:35:26 crc kubenswrapper[4840]: E1209 17:35:26.612125 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:35:29 crc kubenswrapper[4840]: I1209 17:35:29.608549 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:35:29 crc kubenswrapper[4840]: E1209 17:35:29.609246 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:35:35 crc kubenswrapper[4840]: E1209 17:35:35.610657 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:35:40 crc kubenswrapper[4840]: E1209 17:35:40.622313 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:35:41 crc kubenswrapper[4840]: I1209 17:35:41.608552 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:35:41 crc kubenswrapper[4840]: E1209 17:35:41.608866 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:35:46 crc kubenswrapper[4840]: E1209 17:35:46.610528 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:35:51 crc kubenswrapper[4840]: E1209 17:35:51.611696 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:35:56 crc kubenswrapper[4840]: I1209 17:35:56.608654 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:35:56 crc kubenswrapper[4840]: E1209 17:35:56.609585 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:35:57 crc kubenswrapper[4840]: E1209 17:35:57.611768 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:36:02 crc kubenswrapper[4840]: E1209 17:36:02.612361 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:36:08 crc kubenswrapper[4840]: E1209 17:36:08.611073 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:36:10 crc kubenswrapper[4840]: I1209 17:36:10.609204 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:36:10 crc kubenswrapper[4840]: E1209 17:36:10.610026 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:36:14 crc kubenswrapper[4840]: E1209 17:36:14.618138 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:36:21 crc kubenswrapper[4840]: E1209 17:36:21.611914 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:36:23 crc kubenswrapper[4840]: I1209 17:36:23.608471 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:36:23 crc kubenswrapper[4840]: E1209 17:36:23.609052 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:36:26 crc kubenswrapper[4840]: E1209 17:36:26.612718 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:36:36 crc kubenswrapper[4840]: E1209 17:36:36.610476 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:36:37 crc kubenswrapper[4840]: I1209 17:36:37.609185 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:36:37 crc kubenswrapper[4840]: E1209 17:36:37.609805 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:36:41 crc kubenswrapper[4840]: E1209 17:36:41.611642 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:36:48 crc kubenswrapper[4840]: E1209 17:36:48.613126 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:36:49 crc kubenswrapper[4840]: I1209 17:36:49.609033 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:36:49 crc kubenswrapper[4840]: E1209 17:36:49.609806 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:36:54 crc kubenswrapper[4840]: E1209 17:36:54.617754 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:37:00 crc kubenswrapper[4840]: E1209 17:37:00.610440 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:37:02 crc kubenswrapper[4840]: I1209 17:37:02.609383 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:37:02 crc kubenswrapper[4840]: E1209 17:37:02.609999 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:37:09 crc kubenswrapper[4840]: E1209 17:37:09.611403 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:37:13 crc kubenswrapper[4840]: I1209 17:37:13.608464 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:37:13 crc kubenswrapper[4840]: E1209 17:37:13.609357 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:37:13 crc kubenswrapper[4840]: E1209 17:37:13.610644 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:37:24 crc kubenswrapper[4840]: E1209 17:37:24.616993 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:37:25 crc kubenswrapper[4840]: I1209 17:37:25.609258 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:37:25 crc kubenswrapper[4840]: E1209 17:37:25.609947 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:37:27 crc kubenswrapper[4840]: E1209 17:37:27.610494 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.678618 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8jqqw"] Dec 09 17:37:30 crc kubenswrapper[4840]: E1209 17:37:30.679411 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" containerName="extract-content" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.679428 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" containerName="extract-content" Dec 09 17:37:30 crc kubenswrapper[4840]: E1209 17:37:30.679445 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" containerName="registry-server" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.679452 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" containerName="registry-server" Dec 09 17:37:30 crc kubenswrapper[4840]: E1209 17:37:30.679461 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" containerName="extract-utilities" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.679470 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" containerName="extract-utilities" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.679760 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="064e7a2d-30f6-4a56-85e6-824c8b2b5f2d" containerName="registry-server" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.681461 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.691505 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8jqqw"] Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.738321 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-catalog-content\") pod \"redhat-marketplace-8jqqw\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.738530 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-utilities\") pod \"redhat-marketplace-8jqqw\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.738576 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdcmn\" (UniqueName: \"kubernetes.io/projected/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-kube-api-access-vdcmn\") pod \"redhat-marketplace-8jqqw\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.841992 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-catalog-content\") pod \"redhat-marketplace-8jqqw\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.842537 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-catalog-content\") pod \"redhat-marketplace-8jqqw\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.842768 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-utilities\") pod \"redhat-marketplace-8jqqw\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.843090 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-utilities\") pod \"redhat-marketplace-8jqqw\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.843155 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdcmn\" (UniqueName: \"kubernetes.io/projected/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-kube-api-access-vdcmn\") pod \"redhat-marketplace-8jqqw\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:30 crc kubenswrapper[4840]: I1209 17:37:30.862441 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdcmn\" (UniqueName: \"kubernetes.io/projected/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-kube-api-access-vdcmn\") pod \"redhat-marketplace-8jqqw\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:31 crc kubenswrapper[4840]: I1209 17:37:31.028665 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:31 crc kubenswrapper[4840]: I1209 17:37:31.496206 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8jqqw"] Dec 09 17:37:31 crc kubenswrapper[4840]: I1209 17:37:31.522305 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jqqw" event={"ID":"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b","Type":"ContainerStarted","Data":"d7b92312f634dfcc8973debcf4166c27869be0093aee2da06309b5c2a2b80d2f"} Dec 09 17:37:32 crc kubenswrapper[4840]: I1209 17:37:32.533755 4840 generic.go:334] "Generic (PLEG): container finished" podID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" containerID="ff08b8638ff6d5f7004019cde9ccdc68499b9a21d32701f07b38fc8a1c8afe7c" exitCode=0 Dec 09 17:37:32 crc kubenswrapper[4840]: I1209 17:37:32.533848 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jqqw" event={"ID":"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b","Type":"ContainerDied","Data":"ff08b8638ff6d5f7004019cde9ccdc68499b9a21d32701f07b38fc8a1c8afe7c"} Dec 09 17:37:32 crc kubenswrapper[4840]: I1209 17:37:32.536609 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 17:37:34 crc kubenswrapper[4840]: I1209 17:37:34.553308 4840 generic.go:334] "Generic (PLEG): container finished" podID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" containerID="89229f18e3d984e36d2eae2615f45f92b593526357211c7b7a78fa0be3c66b4c" exitCode=0 Dec 09 17:37:34 crc kubenswrapper[4840]: I1209 17:37:34.553377 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jqqw" event={"ID":"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b","Type":"ContainerDied","Data":"89229f18e3d984e36d2eae2615f45f92b593526357211c7b7a78fa0be3c66b4c"} Dec 09 17:37:36 crc kubenswrapper[4840]: I1209 17:37:36.573250 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jqqw" event={"ID":"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b","Type":"ContainerStarted","Data":"68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a"} Dec 09 17:37:36 crc kubenswrapper[4840]: I1209 17:37:36.596864 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8jqqw" podStartSLOduration=3.624924531 podStartE2EDuration="6.596846683s" podCreationTimestamp="2025-12-09 17:37:30 +0000 UTC" firstStartedPulling="2025-12-09 17:37:32.536363992 +0000 UTC m=+2438.527474645" lastFinishedPulling="2025-12-09 17:37:35.508286124 +0000 UTC m=+2441.499396797" observedRunningTime="2025-12-09 17:37:36.592101428 +0000 UTC m=+2442.583212061" watchObservedRunningTime="2025-12-09 17:37:36.596846683 +0000 UTC m=+2442.587957316" Dec 09 17:37:38 crc kubenswrapper[4840]: I1209 17:37:38.609727 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:37:38 crc kubenswrapper[4840]: E1209 17:37:38.610413 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:37:38 crc kubenswrapper[4840]: E1209 17:37:38.612042 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:37:40 crc kubenswrapper[4840]: E1209 17:37:40.715479 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:37:40 crc kubenswrapper[4840]: E1209 17:37:40.715831 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:37:40 crc kubenswrapper[4840]: E1209 17:37:40.716022 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:37:40 crc kubenswrapper[4840]: E1209 17:37:40.717272 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:37:41 crc kubenswrapper[4840]: I1209 17:37:41.029833 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:41 crc kubenswrapper[4840]: I1209 17:37:41.030230 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:41 crc kubenswrapper[4840]: I1209 17:37:41.114521 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:41 crc kubenswrapper[4840]: I1209 17:37:41.667199 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:41 crc kubenswrapper[4840]: I1209 17:37:41.720119 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8jqqw"] Dec 09 17:37:43 crc kubenswrapper[4840]: I1209 17:37:43.641860 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8jqqw" podUID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" containerName="registry-server" containerID="cri-o://68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a" gracePeriod=2 Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.177212 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.270399 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdcmn\" (UniqueName: \"kubernetes.io/projected/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-kube-api-access-vdcmn\") pod \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.270482 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-catalog-content\") pod \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.270747 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-utilities\") pod \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\" (UID: \"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b\") " Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.271629 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-utilities" (OuterVolumeSpecName: "utilities") pod "d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" (UID: "d3687335-4d97-4c1d-b8ec-8b7e5f5e823b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.275717 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-kube-api-access-vdcmn" (OuterVolumeSpecName: "kube-api-access-vdcmn") pod "d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" (UID: "d3687335-4d97-4c1d-b8ec-8b7e5f5e823b"). InnerVolumeSpecName "kube-api-access-vdcmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.311630 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" (UID: "d3687335-4d97-4c1d-b8ec-8b7e5f5e823b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.373242 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdcmn\" (UniqueName: \"kubernetes.io/projected/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-kube-api-access-vdcmn\") on node \"crc\" DevicePath \"\"" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.373281 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.373291 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.658000 4840 generic.go:334] "Generic (PLEG): container finished" podID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" containerID="68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a" exitCode=0 Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.658095 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jqqw" event={"ID":"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b","Type":"ContainerDied","Data":"68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a"} Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.659113 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jqqw" event={"ID":"d3687335-4d97-4c1d-b8ec-8b7e5f5e823b","Type":"ContainerDied","Data":"d7b92312f634dfcc8973debcf4166c27869be0093aee2da06309b5c2a2b80d2f"} Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.658133 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8jqqw" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.659220 4840 scope.go:117] "RemoveContainer" containerID="68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.695835 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8jqqw"] Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.697193 4840 scope.go:117] "RemoveContainer" containerID="89229f18e3d984e36d2eae2615f45f92b593526357211c7b7a78fa0be3c66b4c" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.705320 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8jqqw"] Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.729486 4840 scope.go:117] "RemoveContainer" containerID="ff08b8638ff6d5f7004019cde9ccdc68499b9a21d32701f07b38fc8a1c8afe7c" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.770296 4840 scope.go:117] "RemoveContainer" containerID="68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a" Dec 09 17:37:44 crc kubenswrapper[4840]: E1209 17:37:44.770817 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a\": container with ID starting with 68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a not found: ID does not exist" containerID="68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.770850 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a"} err="failed to get container status \"68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a\": rpc error: code = NotFound desc = could not find container \"68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a\": container with ID starting with 68ee85abb8d2685ec66413da3819963aed357dabd92cd67f5787d3b935adc89a not found: ID does not exist" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.770871 4840 scope.go:117] "RemoveContainer" containerID="89229f18e3d984e36d2eae2615f45f92b593526357211c7b7a78fa0be3c66b4c" Dec 09 17:37:44 crc kubenswrapper[4840]: E1209 17:37:44.771229 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89229f18e3d984e36d2eae2615f45f92b593526357211c7b7a78fa0be3c66b4c\": container with ID starting with 89229f18e3d984e36d2eae2615f45f92b593526357211c7b7a78fa0be3c66b4c not found: ID does not exist" containerID="89229f18e3d984e36d2eae2615f45f92b593526357211c7b7a78fa0be3c66b4c" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.771248 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89229f18e3d984e36d2eae2615f45f92b593526357211c7b7a78fa0be3c66b4c"} err="failed to get container status \"89229f18e3d984e36d2eae2615f45f92b593526357211c7b7a78fa0be3c66b4c\": rpc error: code = NotFound desc = could not find container \"89229f18e3d984e36d2eae2615f45f92b593526357211c7b7a78fa0be3c66b4c\": container with ID starting with 89229f18e3d984e36d2eae2615f45f92b593526357211c7b7a78fa0be3c66b4c not found: ID does not exist" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.771262 4840 scope.go:117] "RemoveContainer" containerID="ff08b8638ff6d5f7004019cde9ccdc68499b9a21d32701f07b38fc8a1c8afe7c" Dec 09 17:37:44 crc kubenswrapper[4840]: E1209 17:37:44.771466 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff08b8638ff6d5f7004019cde9ccdc68499b9a21d32701f07b38fc8a1c8afe7c\": container with ID starting with ff08b8638ff6d5f7004019cde9ccdc68499b9a21d32701f07b38fc8a1c8afe7c not found: ID does not exist" containerID="ff08b8638ff6d5f7004019cde9ccdc68499b9a21d32701f07b38fc8a1c8afe7c" Dec 09 17:37:44 crc kubenswrapper[4840]: I1209 17:37:44.771491 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff08b8638ff6d5f7004019cde9ccdc68499b9a21d32701f07b38fc8a1c8afe7c"} err="failed to get container status \"ff08b8638ff6d5f7004019cde9ccdc68499b9a21d32701f07b38fc8a1c8afe7c\": rpc error: code = NotFound desc = could not find container \"ff08b8638ff6d5f7004019cde9ccdc68499b9a21d32701f07b38fc8a1c8afe7c\": container with ID starting with ff08b8638ff6d5f7004019cde9ccdc68499b9a21d32701f07b38fc8a1c8afe7c not found: ID does not exist" Dec 09 17:37:46 crc kubenswrapper[4840]: I1209 17:37:46.621989 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" path="/var/lib/kubelet/pods/d3687335-4d97-4c1d-b8ec-8b7e5f5e823b/volumes" Dec 09 17:37:52 crc kubenswrapper[4840]: I1209 17:37:52.609498 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:37:52 crc kubenswrapper[4840]: E1209 17:37:52.610316 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:37:53 crc kubenswrapper[4840]: E1209 17:37:53.752524 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:37:53 crc kubenswrapper[4840]: E1209 17:37:53.752665 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:37:53 crc kubenswrapper[4840]: E1209 17:37:53.752840 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:37:53 crc kubenswrapper[4840]: E1209 17:37:53.754727 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:37:54 crc kubenswrapper[4840]: E1209 17:37:54.615484 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:38:07 crc kubenswrapper[4840]: I1209 17:38:07.609374 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:38:07 crc kubenswrapper[4840]: E1209 17:38:07.610290 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:38:07 crc kubenswrapper[4840]: E1209 17:38:07.613034 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:38:07 crc kubenswrapper[4840]: E1209 17:38:07.613259 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:38:18 crc kubenswrapper[4840]: E1209 17:38:18.610519 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:38:19 crc kubenswrapper[4840]: I1209 17:38:19.608739 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:38:19 crc kubenswrapper[4840]: E1209 17:38:19.609281 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:38:21 crc kubenswrapper[4840]: E1209 17:38:21.612051 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:38:30 crc kubenswrapper[4840]: I1209 17:38:30.609197 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:38:30 crc kubenswrapper[4840]: E1209 17:38:30.610163 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:38:31 crc kubenswrapper[4840]: E1209 17:38:31.613347 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:38:33 crc kubenswrapper[4840]: E1209 17:38:33.610840 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:38:41 crc kubenswrapper[4840]: I1209 17:38:41.608848 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:38:41 crc kubenswrapper[4840]: E1209 17:38:41.609708 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:38:42 crc kubenswrapper[4840]: E1209 17:38:42.610981 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:38:44 crc kubenswrapper[4840]: E1209 17:38:44.646224 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:38:53 crc kubenswrapper[4840]: I1209 17:38:53.609377 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:38:53 crc kubenswrapper[4840]: E1209 17:38:53.610400 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:38:55 crc kubenswrapper[4840]: E1209 17:38:55.612743 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:38:58 crc kubenswrapper[4840]: E1209 17:38:58.610695 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:39:04 crc kubenswrapper[4840]: I1209 17:39:04.614414 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:39:05 crc kubenswrapper[4840]: I1209 17:39:05.422108 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"099e47ddd47d5e4281022c7138b08646f9cd89a2a26e6b5ed6287d0e1bceedcc"} Dec 09 17:39:07 crc kubenswrapper[4840]: E1209 17:39:07.610510 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:39:11 crc kubenswrapper[4840]: E1209 17:39:11.611215 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:39:21 crc kubenswrapper[4840]: E1209 17:39:21.611588 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:39:25 crc kubenswrapper[4840]: E1209 17:39:25.610447 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:39:33 crc kubenswrapper[4840]: E1209 17:39:33.611879 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:39:37 crc kubenswrapper[4840]: E1209 17:39:37.609650 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:39:47 crc kubenswrapper[4840]: E1209 17:39:47.610634 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:39:51 crc kubenswrapper[4840]: E1209 17:39:51.610318 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:40:00 crc kubenswrapper[4840]: E1209 17:40:00.610841 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:40:03 crc kubenswrapper[4840]: E1209 17:40:03.609835 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:40:14 crc kubenswrapper[4840]: E1209 17:40:14.624491 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:40:15 crc kubenswrapper[4840]: I1209 17:40:15.107618 4840 generic.go:334] "Generic (PLEG): container finished" podID="7fb2365c-d487-44bb-8096-85400eb2f6ee" containerID="637e2b3b820cf8dedb9e591afba8e30c74c2408bcfdb5aa1e392f5cc4ff98035" exitCode=2 Dec 09 17:40:15 crc kubenswrapper[4840]: I1209 17:40:15.107707 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" event={"ID":"7fb2365c-d487-44bb-8096-85400eb2f6ee","Type":"ContainerDied","Data":"637e2b3b820cf8dedb9e591afba8e30c74c2408bcfdb5aa1e392f5cc4ff98035"} Dec 09 17:40:16 crc kubenswrapper[4840]: I1209 17:40:16.621248 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:40:16 crc kubenswrapper[4840]: I1209 17:40:16.746471 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-ssh-key\") pod \"7fb2365c-d487-44bb-8096-85400eb2f6ee\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " Dec 09 17:40:16 crc kubenswrapper[4840]: I1209 17:40:16.746662 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-inventory\") pod \"7fb2365c-d487-44bb-8096-85400eb2f6ee\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " Dec 09 17:40:16 crc kubenswrapper[4840]: I1209 17:40:16.746823 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4n8v\" (UniqueName: \"kubernetes.io/projected/7fb2365c-d487-44bb-8096-85400eb2f6ee-kube-api-access-g4n8v\") pod \"7fb2365c-d487-44bb-8096-85400eb2f6ee\" (UID: \"7fb2365c-d487-44bb-8096-85400eb2f6ee\") " Dec 09 17:40:16 crc kubenswrapper[4840]: I1209 17:40:16.752821 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fb2365c-d487-44bb-8096-85400eb2f6ee-kube-api-access-g4n8v" (OuterVolumeSpecName: "kube-api-access-g4n8v") pod "7fb2365c-d487-44bb-8096-85400eb2f6ee" (UID: "7fb2365c-d487-44bb-8096-85400eb2f6ee"). InnerVolumeSpecName "kube-api-access-g4n8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:40:16 crc kubenswrapper[4840]: I1209 17:40:16.775212 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-inventory" (OuterVolumeSpecName: "inventory") pod "7fb2365c-d487-44bb-8096-85400eb2f6ee" (UID: "7fb2365c-d487-44bb-8096-85400eb2f6ee"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:40:16 crc kubenswrapper[4840]: I1209 17:40:16.775226 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7fb2365c-d487-44bb-8096-85400eb2f6ee" (UID: "7fb2365c-d487-44bb-8096-85400eb2f6ee"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:40:16 crc kubenswrapper[4840]: I1209 17:40:16.848899 4840 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 17:40:16 crc kubenswrapper[4840]: I1209 17:40:16.848929 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4n8v\" (UniqueName: \"kubernetes.io/projected/7fb2365c-d487-44bb-8096-85400eb2f6ee-kube-api-access-g4n8v\") on node \"crc\" DevicePath \"\"" Dec 09 17:40:16 crc kubenswrapper[4840]: I1209 17:40:16.848940 4840 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7fb2365c-d487-44bb-8096-85400eb2f6ee-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 17:40:17 crc kubenswrapper[4840]: I1209 17:40:17.131283 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" event={"ID":"7fb2365c-d487-44bb-8096-85400eb2f6ee","Type":"ContainerDied","Data":"bcff211dd490f3817b9636ba07e0d4a96dedcf0707e15d2620306d7173dee732"} Dec 09 17:40:17 crc kubenswrapper[4840]: I1209 17:40:17.131326 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bcff211dd490f3817b9636ba07e0d4a96dedcf0707e15d2620306d7173dee732" Dec 09 17:40:17 crc kubenswrapper[4840]: I1209 17:40:17.131367 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl" Dec 09 17:40:17 crc kubenswrapper[4840]: E1209 17:40:17.610789 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:40:29 crc kubenswrapper[4840]: E1209 17:40:29.610728 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:40:30 crc kubenswrapper[4840]: E1209 17:40:30.609957 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.032284 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr"] Dec 09 17:40:34 crc kubenswrapper[4840]: E1209 17:40:34.033444 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" containerName="extract-utilities" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.033463 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" containerName="extract-utilities" Dec 09 17:40:34 crc kubenswrapper[4840]: E1209 17:40:34.033502 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" containerName="extract-content" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.033511 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" containerName="extract-content" Dec 09 17:40:34 crc kubenswrapper[4840]: E1209 17:40:34.033529 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" containerName="registry-server" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.033536 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" containerName="registry-server" Dec 09 17:40:34 crc kubenswrapper[4840]: E1209 17:40:34.033549 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fb2365c-d487-44bb-8096-85400eb2f6ee" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.033557 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fb2365c-d487-44bb-8096-85400eb2f6ee" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.033827 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3687335-4d97-4c1d-b8ec-8b7e5f5e823b" containerName="registry-server" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.033847 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fb2365c-d487-44bb-8096-85400eb2f6ee" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.035144 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.039112 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.039119 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.040147 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qrgfg" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.042752 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr"] Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.049232 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.107150 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.107469 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.107617 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4b55d\" (UniqueName: \"kubernetes.io/projected/18b120a1-f1fb-4739-8c18-2a4380eb70e0-kube-api-access-4b55d\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.209493 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4b55d\" (UniqueName: \"kubernetes.io/projected/18b120a1-f1fb-4739-8c18-2a4380eb70e0-kube-api-access-4b55d\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.209587 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.209691 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.216126 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.216126 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.226454 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4b55d\" (UniqueName: \"kubernetes.io/projected/18b120a1-f1fb-4739-8c18-2a4380eb70e0-kube-api-access-4b55d\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.360223 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:40:34 crc kubenswrapper[4840]: I1209 17:40:34.959927 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr"] Dec 09 17:40:35 crc kubenswrapper[4840]: I1209 17:40:35.313066 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" event={"ID":"18b120a1-f1fb-4739-8c18-2a4380eb70e0","Type":"ContainerStarted","Data":"36ccfab1bf0baf3427ae331027cc3f2f8a6b32dcde98b9f4881df75e61f021a6"} Dec 09 17:40:36 crc kubenswrapper[4840]: I1209 17:40:36.321772 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" event={"ID":"18b120a1-f1fb-4739-8c18-2a4380eb70e0","Type":"ContainerStarted","Data":"3f713485b0100adfc3fb8d88ae034cbfbbdadaec8400d65de54e05d6277cea6e"} Dec 09 17:40:36 crc kubenswrapper[4840]: I1209 17:40:36.345722 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" podStartSLOduration=1.810431793 podStartE2EDuration="2.345704782s" podCreationTimestamp="2025-12-09 17:40:34 +0000 UTC" firstStartedPulling="2025-12-09 17:40:34.961325321 +0000 UTC m=+2620.952435954" lastFinishedPulling="2025-12-09 17:40:35.49659831 +0000 UTC m=+2621.487708943" observedRunningTime="2025-12-09 17:40:36.338600791 +0000 UTC m=+2622.329711434" watchObservedRunningTime="2025-12-09 17:40:36.345704782 +0000 UTC m=+2622.336815415" Dec 09 17:40:40 crc kubenswrapper[4840]: E1209 17:40:40.612575 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:40:44 crc kubenswrapper[4840]: E1209 17:40:44.615646 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:40:54 crc kubenswrapper[4840]: E1209 17:40:54.619757 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:40:59 crc kubenswrapper[4840]: E1209 17:40:59.610154 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:41:04 crc kubenswrapper[4840]: I1209 17:41:04.036606 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:41:04 crc kubenswrapper[4840]: I1209 17:41:04.037253 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:41:08 crc kubenswrapper[4840]: E1209 17:41:08.624032 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.258583 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vhjjb"] Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.261575 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.284801 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vhjjb"] Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.361492 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jn57v\" (UniqueName: \"kubernetes.io/projected/24587b66-cd8b-40ca-84f9-9d70289b2a01-kube-api-access-jn57v\") pod \"redhat-operators-vhjjb\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.361616 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-catalog-content\") pod \"redhat-operators-vhjjb\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.361676 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-utilities\") pod \"redhat-operators-vhjjb\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.463395 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jn57v\" (UniqueName: \"kubernetes.io/projected/24587b66-cd8b-40ca-84f9-9d70289b2a01-kube-api-access-jn57v\") pod \"redhat-operators-vhjjb\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.463813 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-catalog-content\") pod \"redhat-operators-vhjjb\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.463896 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-utilities\") pod \"redhat-operators-vhjjb\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.464400 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-catalog-content\") pod \"redhat-operators-vhjjb\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.464538 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-utilities\") pod \"redhat-operators-vhjjb\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.482698 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jn57v\" (UniqueName: \"kubernetes.io/projected/24587b66-cd8b-40ca-84f9-9d70289b2a01-kube-api-access-jn57v\") pod \"redhat-operators-vhjjb\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:13 crc kubenswrapper[4840]: I1209 17:41:13.598859 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:14 crc kubenswrapper[4840]: I1209 17:41:14.153928 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vhjjb"] Dec 09 17:41:14 crc kubenswrapper[4840]: W1209 17:41:14.156906 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24587b66_cd8b_40ca_84f9_9d70289b2a01.slice/crio-550d0819eb7818321be7949b903033e2822117b256c282635ca77536c466fdf5 WatchSource:0}: Error finding container 550d0819eb7818321be7949b903033e2822117b256c282635ca77536c466fdf5: Status 404 returned error can't find the container with id 550d0819eb7818321be7949b903033e2822117b256c282635ca77536c466fdf5 Dec 09 17:41:14 crc kubenswrapper[4840]: E1209 17:41:14.616222 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:41:14 crc kubenswrapper[4840]: I1209 17:41:14.848100 4840 generic.go:334] "Generic (PLEG): container finished" podID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerID="9354f297ed62af9e94496b16b5148f47cab89f0614683cd3355cb1c3005e4562" exitCode=0 Dec 09 17:41:14 crc kubenswrapper[4840]: I1209 17:41:14.848188 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vhjjb" event={"ID":"24587b66-cd8b-40ca-84f9-9d70289b2a01","Type":"ContainerDied","Data":"9354f297ed62af9e94496b16b5148f47cab89f0614683cd3355cb1c3005e4562"} Dec 09 17:41:14 crc kubenswrapper[4840]: I1209 17:41:14.848572 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vhjjb" event={"ID":"24587b66-cd8b-40ca-84f9-9d70289b2a01","Type":"ContainerStarted","Data":"550d0819eb7818321be7949b903033e2822117b256c282635ca77536c466fdf5"} Dec 09 17:41:16 crc kubenswrapper[4840]: I1209 17:41:16.870308 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vhjjb" event={"ID":"24587b66-cd8b-40ca-84f9-9d70289b2a01","Type":"ContainerStarted","Data":"bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f"} Dec 09 17:41:18 crc kubenswrapper[4840]: I1209 17:41:18.887710 4840 generic.go:334] "Generic (PLEG): container finished" podID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerID="bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f" exitCode=0 Dec 09 17:41:18 crc kubenswrapper[4840]: I1209 17:41:18.887787 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vhjjb" event={"ID":"24587b66-cd8b-40ca-84f9-9d70289b2a01","Type":"ContainerDied","Data":"bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f"} Dec 09 17:41:19 crc kubenswrapper[4840]: I1209 17:41:19.903035 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vhjjb" event={"ID":"24587b66-cd8b-40ca-84f9-9d70289b2a01","Type":"ContainerStarted","Data":"81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8"} Dec 09 17:41:19 crc kubenswrapper[4840]: I1209 17:41:19.923255 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vhjjb" podStartSLOduration=2.398395881 podStartE2EDuration="6.923234524s" podCreationTimestamp="2025-12-09 17:41:13 +0000 UTC" firstStartedPulling="2025-12-09 17:41:14.851812972 +0000 UTC m=+2660.842923605" lastFinishedPulling="2025-12-09 17:41:19.376651615 +0000 UTC m=+2665.367762248" observedRunningTime="2025-12-09 17:41:19.922857614 +0000 UTC m=+2665.913968247" watchObservedRunningTime="2025-12-09 17:41:19.923234524 +0000 UTC m=+2665.914345157" Dec 09 17:41:20 crc kubenswrapper[4840]: E1209 17:41:20.612890 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:41:23 crc kubenswrapper[4840]: I1209 17:41:23.599843 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:23 crc kubenswrapper[4840]: I1209 17:41:23.600470 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:24 crc kubenswrapper[4840]: I1209 17:41:24.653901 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vhjjb" podUID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerName="registry-server" probeResult="failure" output=< Dec 09 17:41:24 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 17:41:24 crc kubenswrapper[4840]: > Dec 09 17:41:27 crc kubenswrapper[4840]: E1209 17:41:27.611951 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:41:32 crc kubenswrapper[4840]: E1209 17:41:32.611365 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:41:33 crc kubenswrapper[4840]: I1209 17:41:33.660317 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:33 crc kubenswrapper[4840]: I1209 17:41:33.721814 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:33 crc kubenswrapper[4840]: I1209 17:41:33.906409 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vhjjb"] Dec 09 17:41:34 crc kubenswrapper[4840]: I1209 17:41:34.035949 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:41:34 crc kubenswrapper[4840]: I1209 17:41:34.036049 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:41:35 crc kubenswrapper[4840]: I1209 17:41:35.035864 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vhjjb" podUID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerName="registry-server" containerID="cri-o://81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8" gracePeriod=2 Dec 09 17:41:35 crc kubenswrapper[4840]: I1209 17:41:35.599220 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:35 crc kubenswrapper[4840]: I1209 17:41:35.730445 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-catalog-content\") pod \"24587b66-cd8b-40ca-84f9-9d70289b2a01\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " Dec 09 17:41:35 crc kubenswrapper[4840]: I1209 17:41:35.730732 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-utilities\") pod \"24587b66-cd8b-40ca-84f9-9d70289b2a01\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " Dec 09 17:41:35 crc kubenswrapper[4840]: I1209 17:41:35.730957 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jn57v\" (UniqueName: \"kubernetes.io/projected/24587b66-cd8b-40ca-84f9-9d70289b2a01-kube-api-access-jn57v\") pod \"24587b66-cd8b-40ca-84f9-9d70289b2a01\" (UID: \"24587b66-cd8b-40ca-84f9-9d70289b2a01\") " Dec 09 17:41:35 crc kubenswrapper[4840]: I1209 17:41:35.731390 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-utilities" (OuterVolumeSpecName: "utilities") pod "24587b66-cd8b-40ca-84f9-9d70289b2a01" (UID: "24587b66-cd8b-40ca-84f9-9d70289b2a01"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:41:35 crc kubenswrapper[4840]: I1209 17:41:35.732212 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:41:35 crc kubenswrapper[4840]: I1209 17:41:35.737566 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24587b66-cd8b-40ca-84f9-9d70289b2a01-kube-api-access-jn57v" (OuterVolumeSpecName: "kube-api-access-jn57v") pod "24587b66-cd8b-40ca-84f9-9d70289b2a01" (UID: "24587b66-cd8b-40ca-84f9-9d70289b2a01"). InnerVolumeSpecName "kube-api-access-jn57v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:41:35 crc kubenswrapper[4840]: I1209 17:41:35.834731 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jn57v\" (UniqueName: \"kubernetes.io/projected/24587b66-cd8b-40ca-84f9-9d70289b2a01-kube-api-access-jn57v\") on node \"crc\" DevicePath \"\"" Dec 09 17:41:35 crc kubenswrapper[4840]: I1209 17:41:35.852209 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24587b66-cd8b-40ca-84f9-9d70289b2a01" (UID: "24587b66-cd8b-40ca-84f9-9d70289b2a01"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:41:35 crc kubenswrapper[4840]: I1209 17:41:35.935976 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24587b66-cd8b-40ca-84f9-9d70289b2a01-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.046553 4840 generic.go:334] "Generic (PLEG): container finished" podID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerID="81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8" exitCode=0 Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.046599 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vhjjb" event={"ID":"24587b66-cd8b-40ca-84f9-9d70289b2a01","Type":"ContainerDied","Data":"81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8"} Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.046610 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vhjjb" Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.046625 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vhjjb" event={"ID":"24587b66-cd8b-40ca-84f9-9d70289b2a01","Type":"ContainerDied","Data":"550d0819eb7818321be7949b903033e2822117b256c282635ca77536c466fdf5"} Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.046640 4840 scope.go:117] "RemoveContainer" containerID="81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8" Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.090979 4840 scope.go:117] "RemoveContainer" containerID="bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f" Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.096327 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vhjjb"] Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.104712 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vhjjb"] Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.130157 4840 scope.go:117] "RemoveContainer" containerID="9354f297ed62af9e94496b16b5148f47cab89f0614683cd3355cb1c3005e4562" Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.188191 4840 scope.go:117] "RemoveContainer" containerID="81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8" Dec 09 17:41:36 crc kubenswrapper[4840]: E1209 17:41:36.188523 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8\": container with ID starting with 81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8 not found: ID does not exist" containerID="81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8" Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.188643 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8"} err="failed to get container status \"81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8\": rpc error: code = NotFound desc = could not find container \"81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8\": container with ID starting with 81e750e488d9b061e517ccb84f217a1b20abf7b17720a789d7f5aed4128c9db8 not found: ID does not exist" Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.189247 4840 scope.go:117] "RemoveContainer" containerID="bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f" Dec 09 17:41:36 crc kubenswrapper[4840]: E1209 17:41:36.189640 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f\": container with ID starting with bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f not found: ID does not exist" containerID="bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f" Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.189680 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f"} err="failed to get container status \"bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f\": rpc error: code = NotFound desc = could not find container \"bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f\": container with ID starting with bd235f35fdd7131f6f1e218b3f77db990a81520fc2843dce123f0d7b26b13a9f not found: ID does not exist" Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.189708 4840 scope.go:117] "RemoveContainer" containerID="9354f297ed62af9e94496b16b5148f47cab89f0614683cd3355cb1c3005e4562" Dec 09 17:41:36 crc kubenswrapper[4840]: E1209 17:41:36.190761 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9354f297ed62af9e94496b16b5148f47cab89f0614683cd3355cb1c3005e4562\": container with ID starting with 9354f297ed62af9e94496b16b5148f47cab89f0614683cd3355cb1c3005e4562 not found: ID does not exist" containerID="9354f297ed62af9e94496b16b5148f47cab89f0614683cd3355cb1c3005e4562" Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.190789 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9354f297ed62af9e94496b16b5148f47cab89f0614683cd3355cb1c3005e4562"} err="failed to get container status \"9354f297ed62af9e94496b16b5148f47cab89f0614683cd3355cb1c3005e4562\": rpc error: code = NotFound desc = could not find container \"9354f297ed62af9e94496b16b5148f47cab89f0614683cd3355cb1c3005e4562\": container with ID starting with 9354f297ed62af9e94496b16b5148f47cab89f0614683cd3355cb1c3005e4562 not found: ID does not exist" Dec 09 17:41:36 crc kubenswrapper[4840]: I1209 17:41:36.620493 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24587b66-cd8b-40ca-84f9-9d70289b2a01" path="/var/lib/kubelet/pods/24587b66-cd8b-40ca-84f9-9d70289b2a01/volumes" Dec 09 17:41:39 crc kubenswrapper[4840]: E1209 17:41:39.611015 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:41:45 crc kubenswrapper[4840]: E1209 17:41:45.612060 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.213326 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9xms4"] Dec 09 17:41:47 crc kubenswrapper[4840]: E1209 17:41:47.214776 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerName="extract-utilities" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.214865 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerName="extract-utilities" Dec 09 17:41:47 crc kubenswrapper[4840]: E1209 17:41:47.214945 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerName="registry-server" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.215044 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerName="registry-server" Dec 09 17:41:47 crc kubenswrapper[4840]: E1209 17:41:47.215168 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerName="extract-content" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.215250 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerName="extract-content" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.215589 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="24587b66-cd8b-40ca-84f9-9d70289b2a01" containerName="registry-server" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.217483 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.222366 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9xms4"] Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.339825 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-catalog-content\") pod \"certified-operators-9xms4\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.340343 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-utilities\") pod \"certified-operators-9xms4\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.340373 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbgd2\" (UniqueName: \"kubernetes.io/projected/ba95dd47-a93f-47d2-a9d6-12b4786328f5-kube-api-access-xbgd2\") pod \"certified-operators-9xms4\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.442907 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-utilities\") pod \"certified-operators-9xms4\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.442980 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbgd2\" (UniqueName: \"kubernetes.io/projected/ba95dd47-a93f-47d2-a9d6-12b4786328f5-kube-api-access-xbgd2\") pod \"certified-operators-9xms4\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.443126 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-catalog-content\") pod \"certified-operators-9xms4\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.443406 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-utilities\") pod \"certified-operators-9xms4\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.443547 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-catalog-content\") pod \"certified-operators-9xms4\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.462406 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbgd2\" (UniqueName: \"kubernetes.io/projected/ba95dd47-a93f-47d2-a9d6-12b4786328f5-kube-api-access-xbgd2\") pod \"certified-operators-9xms4\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:47 crc kubenswrapper[4840]: I1209 17:41:47.539990 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:48 crc kubenswrapper[4840]: I1209 17:41:48.038128 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9xms4"] Dec 09 17:41:48 crc kubenswrapper[4840]: I1209 17:41:48.154446 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xms4" event={"ID":"ba95dd47-a93f-47d2-a9d6-12b4786328f5","Type":"ContainerStarted","Data":"86b013cc0fbc1b2d6b7d6a83b3b54bedc2ac0f9bec755cf4e78b8ca4b5cdfca7"} Dec 09 17:41:49 crc kubenswrapper[4840]: I1209 17:41:49.166185 4840 generic.go:334] "Generic (PLEG): container finished" podID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" containerID="13788074e76b4965f82ba295813ba2e94c399e3442acc5b6c83182b9c1a20407" exitCode=0 Dec 09 17:41:49 crc kubenswrapper[4840]: I1209 17:41:49.166253 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xms4" event={"ID":"ba95dd47-a93f-47d2-a9d6-12b4786328f5","Type":"ContainerDied","Data":"13788074e76b4965f82ba295813ba2e94c399e3442acc5b6c83182b9c1a20407"} Dec 09 17:41:51 crc kubenswrapper[4840]: I1209 17:41:51.188337 4840 generic.go:334] "Generic (PLEG): container finished" podID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" containerID="99200347e4dd76e7802b555ad800dbda5a4be3e40a088951adbd3f0c6a695b13" exitCode=0 Dec 09 17:41:51 crc kubenswrapper[4840]: I1209 17:41:51.188955 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xms4" event={"ID":"ba95dd47-a93f-47d2-a9d6-12b4786328f5","Type":"ContainerDied","Data":"99200347e4dd76e7802b555ad800dbda5a4be3e40a088951adbd3f0c6a695b13"} Dec 09 17:41:53 crc kubenswrapper[4840]: I1209 17:41:53.219526 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xms4" event={"ID":"ba95dd47-a93f-47d2-a9d6-12b4786328f5","Type":"ContainerStarted","Data":"5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2"} Dec 09 17:41:53 crc kubenswrapper[4840]: I1209 17:41:53.243335 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9xms4" podStartSLOduration=3.2475199630000002 podStartE2EDuration="6.24331732s" podCreationTimestamp="2025-12-09 17:41:47 +0000 UTC" firstStartedPulling="2025-12-09 17:41:49.169479373 +0000 UTC m=+2695.160589996" lastFinishedPulling="2025-12-09 17:41:52.16527668 +0000 UTC m=+2698.156387353" observedRunningTime="2025-12-09 17:41:53.237342081 +0000 UTC m=+2699.228452744" watchObservedRunningTime="2025-12-09 17:41:53.24331732 +0000 UTC m=+2699.234427953" Dec 09 17:41:54 crc kubenswrapper[4840]: E1209 17:41:54.620327 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:41:57 crc kubenswrapper[4840]: I1209 17:41:57.540903 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:57 crc kubenswrapper[4840]: I1209 17:41:57.541541 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:57 crc kubenswrapper[4840]: I1209 17:41:57.604387 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:58 crc kubenswrapper[4840]: I1209 17:41:58.317997 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:41:58 crc kubenswrapper[4840]: I1209 17:41:58.379038 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9xms4"] Dec 09 17:41:58 crc kubenswrapper[4840]: E1209 17:41:58.611522 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:42:00 crc kubenswrapper[4840]: I1209 17:42:00.284834 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9xms4" podUID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" containerName="registry-server" containerID="cri-o://5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2" gracePeriod=2 Dec 09 17:42:00 crc kubenswrapper[4840]: I1209 17:42:00.834381 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.017410 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-utilities\") pod \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.017500 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbgd2\" (UniqueName: \"kubernetes.io/projected/ba95dd47-a93f-47d2-a9d6-12b4786328f5-kube-api-access-xbgd2\") pod \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.017807 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-catalog-content\") pod \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\" (UID: \"ba95dd47-a93f-47d2-a9d6-12b4786328f5\") " Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.019750 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-utilities" (OuterVolumeSpecName: "utilities") pod "ba95dd47-a93f-47d2-a9d6-12b4786328f5" (UID: "ba95dd47-a93f-47d2-a9d6-12b4786328f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.025712 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba95dd47-a93f-47d2-a9d6-12b4786328f5-kube-api-access-xbgd2" (OuterVolumeSpecName: "kube-api-access-xbgd2") pod "ba95dd47-a93f-47d2-a9d6-12b4786328f5" (UID: "ba95dd47-a93f-47d2-a9d6-12b4786328f5"). InnerVolumeSpecName "kube-api-access-xbgd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.120276 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.120305 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbgd2\" (UniqueName: \"kubernetes.io/projected/ba95dd47-a93f-47d2-a9d6-12b4786328f5-kube-api-access-xbgd2\") on node \"crc\" DevicePath \"\"" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.138576 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ba95dd47-a93f-47d2-a9d6-12b4786328f5" (UID: "ba95dd47-a93f-47d2-a9d6-12b4786328f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.222120 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba95dd47-a93f-47d2-a9d6-12b4786328f5-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.294913 4840 generic.go:334] "Generic (PLEG): container finished" podID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" containerID="5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2" exitCode=0 Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.294978 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xms4" event={"ID":"ba95dd47-a93f-47d2-a9d6-12b4786328f5","Type":"ContainerDied","Data":"5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2"} Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.295017 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xms4" event={"ID":"ba95dd47-a93f-47d2-a9d6-12b4786328f5","Type":"ContainerDied","Data":"86b013cc0fbc1b2d6b7d6a83b3b54bedc2ac0f9bec755cf4e78b8ca4b5cdfca7"} Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.295019 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xms4" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.295042 4840 scope.go:117] "RemoveContainer" containerID="5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.329515 4840 scope.go:117] "RemoveContainer" containerID="99200347e4dd76e7802b555ad800dbda5a4be3e40a088951adbd3f0c6a695b13" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.343362 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9xms4"] Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.355048 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9xms4"] Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.372972 4840 scope.go:117] "RemoveContainer" containerID="13788074e76b4965f82ba295813ba2e94c399e3442acc5b6c83182b9c1a20407" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.400440 4840 scope.go:117] "RemoveContainer" containerID="5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2" Dec 09 17:42:01 crc kubenswrapper[4840]: E1209 17:42:01.400895 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2\": container with ID starting with 5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2 not found: ID does not exist" containerID="5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.401014 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2"} err="failed to get container status \"5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2\": rpc error: code = NotFound desc = could not find container \"5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2\": container with ID starting with 5b392766d808dbd43ff33e2e1a1aa41a57c8fca3b365ecabb0537fb94313c6e2 not found: ID does not exist" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.401126 4840 scope.go:117] "RemoveContainer" containerID="99200347e4dd76e7802b555ad800dbda5a4be3e40a088951adbd3f0c6a695b13" Dec 09 17:42:01 crc kubenswrapper[4840]: E1209 17:42:01.401658 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99200347e4dd76e7802b555ad800dbda5a4be3e40a088951adbd3f0c6a695b13\": container with ID starting with 99200347e4dd76e7802b555ad800dbda5a4be3e40a088951adbd3f0c6a695b13 not found: ID does not exist" containerID="99200347e4dd76e7802b555ad800dbda5a4be3e40a088951adbd3f0c6a695b13" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.401713 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99200347e4dd76e7802b555ad800dbda5a4be3e40a088951adbd3f0c6a695b13"} err="failed to get container status \"99200347e4dd76e7802b555ad800dbda5a4be3e40a088951adbd3f0c6a695b13\": rpc error: code = NotFound desc = could not find container \"99200347e4dd76e7802b555ad800dbda5a4be3e40a088951adbd3f0c6a695b13\": container with ID starting with 99200347e4dd76e7802b555ad800dbda5a4be3e40a088951adbd3f0c6a695b13 not found: ID does not exist" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.401741 4840 scope.go:117] "RemoveContainer" containerID="13788074e76b4965f82ba295813ba2e94c399e3442acc5b6c83182b9c1a20407" Dec 09 17:42:01 crc kubenswrapper[4840]: E1209 17:42:01.402036 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13788074e76b4965f82ba295813ba2e94c399e3442acc5b6c83182b9c1a20407\": container with ID starting with 13788074e76b4965f82ba295813ba2e94c399e3442acc5b6c83182b9c1a20407 not found: ID does not exist" containerID="13788074e76b4965f82ba295813ba2e94c399e3442acc5b6c83182b9c1a20407" Dec 09 17:42:01 crc kubenswrapper[4840]: I1209 17:42:01.402139 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13788074e76b4965f82ba295813ba2e94c399e3442acc5b6c83182b9c1a20407"} err="failed to get container status \"13788074e76b4965f82ba295813ba2e94c399e3442acc5b6c83182b9c1a20407\": rpc error: code = NotFound desc = could not find container \"13788074e76b4965f82ba295813ba2e94c399e3442acc5b6c83182b9c1a20407\": container with ID starting with 13788074e76b4965f82ba295813ba2e94c399e3442acc5b6c83182b9c1a20407 not found: ID does not exist" Dec 09 17:42:02 crc kubenswrapper[4840]: I1209 17:42:02.629725 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" path="/var/lib/kubelet/pods/ba95dd47-a93f-47d2-a9d6-12b4786328f5/volumes" Dec 09 17:42:04 crc kubenswrapper[4840]: I1209 17:42:04.035685 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:42:04 crc kubenswrapper[4840]: I1209 17:42:04.036031 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:42:04 crc kubenswrapper[4840]: I1209 17:42:04.036068 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:42:04 crc kubenswrapper[4840]: I1209 17:42:04.036827 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"099e47ddd47d5e4281022c7138b08646f9cd89a2a26e6b5ed6287d0e1bceedcc"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:42:04 crc kubenswrapper[4840]: I1209 17:42:04.036886 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://099e47ddd47d5e4281022c7138b08646f9cd89a2a26e6b5ed6287d0e1bceedcc" gracePeriod=600 Dec 09 17:42:04 crc kubenswrapper[4840]: I1209 17:42:04.326997 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="099e47ddd47d5e4281022c7138b08646f9cd89a2a26e6b5ed6287d0e1bceedcc" exitCode=0 Dec 09 17:42:04 crc kubenswrapper[4840]: I1209 17:42:04.327066 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"099e47ddd47d5e4281022c7138b08646f9cd89a2a26e6b5ed6287d0e1bceedcc"} Dec 09 17:42:04 crc kubenswrapper[4840]: I1209 17:42:04.327321 4840 scope.go:117] "RemoveContainer" containerID="b15ecd794a2caf8db6a1fe3495671fef74ec2b1048ecae0855aca41b4bc2180d" Dec 09 17:42:05 crc kubenswrapper[4840]: I1209 17:42:05.338707 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7"} Dec 09 17:42:09 crc kubenswrapper[4840]: E1209 17:42:09.617188 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:42:09 crc kubenswrapper[4840]: E1209 17:42:09.619206 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:42:20 crc kubenswrapper[4840]: E1209 17:42:20.611447 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:42:23 crc kubenswrapper[4840]: E1209 17:42:23.610509 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:42:31 crc kubenswrapper[4840]: E1209 17:42:31.610855 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:42:34 crc kubenswrapper[4840]: E1209 17:42:34.620660 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:42:45 crc kubenswrapper[4840]: E1209 17:42:45.611314 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:42:47 crc kubenswrapper[4840]: I1209 17:42:47.610051 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 17:42:47 crc kubenswrapper[4840]: E1209 17:42:47.724990 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:42:47 crc kubenswrapper[4840]: E1209 17:42:47.725052 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:42:47 crc kubenswrapper[4840]: E1209 17:42:47.725209 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:42:47 crc kubenswrapper[4840]: E1209 17:42:47.726409 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:42:56 crc kubenswrapper[4840]: E1209 17:42:56.696857 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:42:56 crc kubenswrapper[4840]: E1209 17:42:56.697890 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:42:56 crc kubenswrapper[4840]: E1209 17:42:56.698071 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:42:56 crc kubenswrapper[4840]: E1209 17:42:56.699698 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:42:59 crc kubenswrapper[4840]: E1209 17:42:59.612309 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:43:10 crc kubenswrapper[4840]: E1209 17:43:10.613392 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:43:11 crc kubenswrapper[4840]: E1209 17:43:11.610245 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:43:21 crc kubenswrapper[4840]: E1209 17:43:21.609808 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:43:22 crc kubenswrapper[4840]: E1209 17:43:22.609914 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:43:34 crc kubenswrapper[4840]: E1209 17:43:34.618253 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:43:34 crc kubenswrapper[4840]: E1209 17:43:34.618326 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:43:46 crc kubenswrapper[4840]: E1209 17:43:46.612729 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:43:47 crc kubenswrapper[4840]: E1209 17:43:47.610995 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:44:00 crc kubenswrapper[4840]: E1209 17:44:00.611483 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:44:02 crc kubenswrapper[4840]: E1209 17:44:02.611095 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:44:04 crc kubenswrapper[4840]: I1209 17:44:04.036673 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:44:04 crc kubenswrapper[4840]: I1209 17:44:04.037531 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:44:12 crc kubenswrapper[4840]: E1209 17:44:12.610616 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.636163 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-s84kq"] Dec 09 17:44:13 crc kubenswrapper[4840]: E1209 17:44:13.638026 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" containerName="extract-content" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.638215 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" containerName="extract-content" Dec 09 17:44:13 crc kubenswrapper[4840]: E1209 17:44:13.638342 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" containerName="registry-server" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.638467 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" containerName="registry-server" Dec 09 17:44:13 crc kubenswrapper[4840]: E1209 17:44:13.639493 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" containerName="extract-utilities" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.639637 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" containerName="extract-utilities" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.640208 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba95dd47-a93f-47d2-a9d6-12b4786328f5" containerName="registry-server" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.652349 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.660489 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s84kq"] Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.703049 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-utilities\") pod \"community-operators-s84kq\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.705495 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc2dc\" (UniqueName: \"kubernetes.io/projected/0de3767c-d385-4bf3-b256-525c951316d8-kube-api-access-nc2dc\") pod \"community-operators-s84kq\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.705744 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-catalog-content\") pod \"community-operators-s84kq\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.809384 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-catalog-content\") pod \"community-operators-s84kq\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.809526 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-utilities\") pod \"community-operators-s84kq\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.809701 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc2dc\" (UniqueName: \"kubernetes.io/projected/0de3767c-d385-4bf3-b256-525c951316d8-kube-api-access-nc2dc\") pod \"community-operators-s84kq\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.809948 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-catalog-content\") pod \"community-operators-s84kq\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.810053 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-utilities\") pod \"community-operators-s84kq\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.832795 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc2dc\" (UniqueName: \"kubernetes.io/projected/0de3767c-d385-4bf3-b256-525c951316d8-kube-api-access-nc2dc\") pod \"community-operators-s84kq\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:13 crc kubenswrapper[4840]: I1209 17:44:13.989646 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:14 crc kubenswrapper[4840]: I1209 17:44:14.509327 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s84kq"] Dec 09 17:44:14 crc kubenswrapper[4840]: I1209 17:44:14.622405 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s84kq" event={"ID":"0de3767c-d385-4bf3-b256-525c951316d8","Type":"ContainerStarted","Data":"1921b495c5ab471ea3be68348d3d1de8bd68c354cf709cd4936ddb19586c725a"} Dec 09 17:44:15 crc kubenswrapper[4840]: E1209 17:44:15.610273 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:44:15 crc kubenswrapper[4840]: I1209 17:44:15.632123 4840 generic.go:334] "Generic (PLEG): container finished" podID="0de3767c-d385-4bf3-b256-525c951316d8" containerID="bbc752e06e41a1ef4c619c4e7ca6620dc87bee5aa25def42f5240095b090318f" exitCode=0 Dec 09 17:44:15 crc kubenswrapper[4840]: I1209 17:44:15.632172 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s84kq" event={"ID":"0de3767c-d385-4bf3-b256-525c951316d8","Type":"ContainerDied","Data":"bbc752e06e41a1ef4c619c4e7ca6620dc87bee5aa25def42f5240095b090318f"} Dec 09 17:44:17 crc kubenswrapper[4840]: I1209 17:44:17.649855 4840 generic.go:334] "Generic (PLEG): container finished" podID="0de3767c-d385-4bf3-b256-525c951316d8" containerID="635713850be847e9dd2c799bc07dcc2219439146ecd8d6e360f57569386a9abd" exitCode=0 Dec 09 17:44:17 crc kubenswrapper[4840]: I1209 17:44:17.649902 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s84kq" event={"ID":"0de3767c-d385-4bf3-b256-525c951316d8","Type":"ContainerDied","Data":"635713850be847e9dd2c799bc07dcc2219439146ecd8d6e360f57569386a9abd"} Dec 09 17:44:19 crc kubenswrapper[4840]: I1209 17:44:19.678750 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s84kq" event={"ID":"0de3767c-d385-4bf3-b256-525c951316d8","Type":"ContainerStarted","Data":"c576bfba35196e75499075764f950d871eadfda25aeeb8a974f25ca4764bd111"} Dec 09 17:44:19 crc kubenswrapper[4840]: I1209 17:44:19.702424 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-s84kq" podStartSLOduration=3.060045935 podStartE2EDuration="6.702404661s" podCreationTimestamp="2025-12-09 17:44:13 +0000 UTC" firstStartedPulling="2025-12-09 17:44:15.633306558 +0000 UTC m=+2841.624417191" lastFinishedPulling="2025-12-09 17:44:19.275665274 +0000 UTC m=+2845.266775917" observedRunningTime="2025-12-09 17:44:19.701691851 +0000 UTC m=+2845.692802484" watchObservedRunningTime="2025-12-09 17:44:19.702404661 +0000 UTC m=+2845.693515294" Dec 09 17:44:23 crc kubenswrapper[4840]: E1209 17:44:23.612162 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:44:23 crc kubenswrapper[4840]: I1209 17:44:23.990705 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:23 crc kubenswrapper[4840]: I1209 17:44:23.990835 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:24 crc kubenswrapper[4840]: I1209 17:44:24.034655 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:24 crc kubenswrapper[4840]: I1209 17:44:24.763590 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:24 crc kubenswrapper[4840]: I1209 17:44:24.810326 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s84kq"] Dec 09 17:44:26 crc kubenswrapper[4840]: I1209 17:44:26.752489 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-s84kq" podUID="0de3767c-d385-4bf3-b256-525c951316d8" containerName="registry-server" containerID="cri-o://c576bfba35196e75499075764f950d871eadfda25aeeb8a974f25ca4764bd111" gracePeriod=2 Dec 09 17:44:27 crc kubenswrapper[4840]: I1209 17:44:27.769295 4840 generic.go:334] "Generic (PLEG): container finished" podID="0de3767c-d385-4bf3-b256-525c951316d8" containerID="c576bfba35196e75499075764f950d871eadfda25aeeb8a974f25ca4764bd111" exitCode=0 Dec 09 17:44:27 crc kubenswrapper[4840]: I1209 17:44:27.769389 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s84kq" event={"ID":"0de3767c-d385-4bf3-b256-525c951316d8","Type":"ContainerDied","Data":"c576bfba35196e75499075764f950d871eadfda25aeeb8a974f25ca4764bd111"} Dec 09 17:44:27 crc kubenswrapper[4840]: I1209 17:44:27.770104 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s84kq" event={"ID":"0de3767c-d385-4bf3-b256-525c951316d8","Type":"ContainerDied","Data":"1921b495c5ab471ea3be68348d3d1de8bd68c354cf709cd4936ddb19586c725a"} Dec 09 17:44:27 crc kubenswrapper[4840]: I1209 17:44:27.770131 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1921b495c5ab471ea3be68348d3d1de8bd68c354cf709cd4936ddb19586c725a" Dec 09 17:44:27 crc kubenswrapper[4840]: I1209 17:44:27.815784 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.017319 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-utilities\") pod \"0de3767c-d385-4bf3-b256-525c951316d8\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.017416 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-catalog-content\") pod \"0de3767c-d385-4bf3-b256-525c951316d8\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.017489 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nc2dc\" (UniqueName: \"kubernetes.io/projected/0de3767c-d385-4bf3-b256-525c951316d8-kube-api-access-nc2dc\") pod \"0de3767c-d385-4bf3-b256-525c951316d8\" (UID: \"0de3767c-d385-4bf3-b256-525c951316d8\") " Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.018777 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-utilities" (OuterVolumeSpecName: "utilities") pod "0de3767c-d385-4bf3-b256-525c951316d8" (UID: "0de3767c-d385-4bf3-b256-525c951316d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.034274 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0de3767c-d385-4bf3-b256-525c951316d8-kube-api-access-nc2dc" (OuterVolumeSpecName: "kube-api-access-nc2dc") pod "0de3767c-d385-4bf3-b256-525c951316d8" (UID: "0de3767c-d385-4bf3-b256-525c951316d8"). InnerVolumeSpecName "kube-api-access-nc2dc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.068183 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0de3767c-d385-4bf3-b256-525c951316d8" (UID: "0de3767c-d385-4bf3-b256-525c951316d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.119650 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.119684 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nc2dc\" (UniqueName: \"kubernetes.io/projected/0de3767c-d385-4bf3-b256-525c951316d8-kube-api-access-nc2dc\") on node \"crc\" DevicePath \"\"" Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.119694 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de3767c-d385-4bf3-b256-525c951316d8-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.778145 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s84kq" Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.802467 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s84kq"] Dec 09 17:44:28 crc kubenswrapper[4840]: I1209 17:44:28.810742 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-s84kq"] Dec 09 17:44:30 crc kubenswrapper[4840]: E1209 17:44:30.612385 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:44:30 crc kubenswrapper[4840]: I1209 17:44:30.620431 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0de3767c-d385-4bf3-b256-525c951316d8" path="/var/lib/kubelet/pods/0de3767c-d385-4bf3-b256-525c951316d8/volumes" Dec 09 17:44:34 crc kubenswrapper[4840]: I1209 17:44:34.036649 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:44:34 crc kubenswrapper[4840]: I1209 17:44:34.037063 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:44:35 crc kubenswrapper[4840]: E1209 17:44:35.612050 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:44:43 crc kubenswrapper[4840]: E1209 17:44:43.612880 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:44:48 crc kubenswrapper[4840]: E1209 17:44:48.611201 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:44:55 crc kubenswrapper[4840]: E1209 17:44:55.610720 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:44:59 crc kubenswrapper[4840]: E1209 17:44:59.611097 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.151745 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn"] Dec 09 17:45:00 crc kubenswrapper[4840]: E1209 17:45:00.152859 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de3767c-d385-4bf3-b256-525c951316d8" containerName="extract-utilities" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.153003 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de3767c-d385-4bf3-b256-525c951316d8" containerName="extract-utilities" Dec 09 17:45:00 crc kubenswrapper[4840]: E1209 17:45:00.153118 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de3767c-d385-4bf3-b256-525c951316d8" containerName="extract-content" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.153211 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de3767c-d385-4bf3-b256-525c951316d8" containerName="extract-content" Dec 09 17:45:00 crc kubenswrapper[4840]: E1209 17:45:00.153359 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de3767c-d385-4bf3-b256-525c951316d8" containerName="registry-server" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.153442 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de3767c-d385-4bf3-b256-525c951316d8" containerName="registry-server" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.153813 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="0de3767c-d385-4bf3-b256-525c951316d8" containerName="registry-server" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.154845 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.157566 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.161059 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.161153 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn"] Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.251158 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npht8\" (UniqueName: \"kubernetes.io/projected/b7081818-31e8-4020-99eb-f658ed9adc9f-kube-api-access-npht8\") pod \"collect-profiles-29421705-6c2nn\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.251244 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b7081818-31e8-4020-99eb-f658ed9adc9f-secret-volume\") pod \"collect-profiles-29421705-6c2nn\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.251540 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b7081818-31e8-4020-99eb-f658ed9adc9f-config-volume\") pod \"collect-profiles-29421705-6c2nn\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.353515 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b7081818-31e8-4020-99eb-f658ed9adc9f-config-volume\") pod \"collect-profiles-29421705-6c2nn\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.353629 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npht8\" (UniqueName: \"kubernetes.io/projected/b7081818-31e8-4020-99eb-f658ed9adc9f-kube-api-access-npht8\") pod \"collect-profiles-29421705-6c2nn\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.353690 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b7081818-31e8-4020-99eb-f658ed9adc9f-secret-volume\") pod \"collect-profiles-29421705-6c2nn\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.354494 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b7081818-31e8-4020-99eb-f658ed9adc9f-config-volume\") pod \"collect-profiles-29421705-6c2nn\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.371430 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npht8\" (UniqueName: \"kubernetes.io/projected/b7081818-31e8-4020-99eb-f658ed9adc9f-kube-api-access-npht8\") pod \"collect-profiles-29421705-6c2nn\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.371472 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b7081818-31e8-4020-99eb-f658ed9adc9f-secret-volume\") pod \"collect-profiles-29421705-6c2nn\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:00 crc kubenswrapper[4840]: I1209 17:45:00.485718 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:01 crc kubenswrapper[4840]: I1209 17:45:01.000210 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn"] Dec 09 17:45:01 crc kubenswrapper[4840]: I1209 17:45:01.114662 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" event={"ID":"b7081818-31e8-4020-99eb-f658ed9adc9f","Type":"ContainerStarted","Data":"a1f17a403bc6823c273bcb7aecebcbf3f8daa35c13da75ff935cd9241618dc06"} Dec 09 17:45:02 crc kubenswrapper[4840]: I1209 17:45:02.129690 4840 generic.go:334] "Generic (PLEG): container finished" podID="b7081818-31e8-4020-99eb-f658ed9adc9f" containerID="30099bf8ce7949efa21639a62d1a1e5ff585993db4000056f8f156adb6cb7499" exitCode=0 Dec 09 17:45:02 crc kubenswrapper[4840]: I1209 17:45:02.130040 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" event={"ID":"b7081818-31e8-4020-99eb-f658ed9adc9f","Type":"ContainerDied","Data":"30099bf8ce7949efa21639a62d1a1e5ff585993db4000056f8f156adb6cb7499"} Dec 09 17:45:03 crc kubenswrapper[4840]: I1209 17:45:03.605905 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:03 crc kubenswrapper[4840]: I1209 17:45:03.721409 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b7081818-31e8-4020-99eb-f658ed9adc9f-secret-volume\") pod \"b7081818-31e8-4020-99eb-f658ed9adc9f\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " Dec 09 17:45:03 crc kubenswrapper[4840]: I1209 17:45:03.721559 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-npht8\" (UniqueName: \"kubernetes.io/projected/b7081818-31e8-4020-99eb-f658ed9adc9f-kube-api-access-npht8\") pod \"b7081818-31e8-4020-99eb-f658ed9adc9f\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " Dec 09 17:45:03 crc kubenswrapper[4840]: I1209 17:45:03.721637 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b7081818-31e8-4020-99eb-f658ed9adc9f-config-volume\") pod \"b7081818-31e8-4020-99eb-f658ed9adc9f\" (UID: \"b7081818-31e8-4020-99eb-f658ed9adc9f\") " Dec 09 17:45:03 crc kubenswrapper[4840]: I1209 17:45:03.722535 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7081818-31e8-4020-99eb-f658ed9adc9f-config-volume" (OuterVolumeSpecName: "config-volume") pod "b7081818-31e8-4020-99eb-f658ed9adc9f" (UID: "b7081818-31e8-4020-99eb-f658ed9adc9f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 17:45:03 crc kubenswrapper[4840]: I1209 17:45:03.727573 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7081818-31e8-4020-99eb-f658ed9adc9f-kube-api-access-npht8" (OuterVolumeSpecName: "kube-api-access-npht8") pod "b7081818-31e8-4020-99eb-f658ed9adc9f" (UID: "b7081818-31e8-4020-99eb-f658ed9adc9f"). InnerVolumeSpecName "kube-api-access-npht8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:45:03 crc kubenswrapper[4840]: I1209 17:45:03.741001 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7081818-31e8-4020-99eb-f658ed9adc9f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b7081818-31e8-4020-99eb-f658ed9adc9f" (UID: "b7081818-31e8-4020-99eb-f658ed9adc9f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:45:03 crc kubenswrapper[4840]: I1209 17:45:03.826615 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-npht8\" (UniqueName: \"kubernetes.io/projected/b7081818-31e8-4020-99eb-f658ed9adc9f-kube-api-access-npht8\") on node \"crc\" DevicePath \"\"" Dec 09 17:45:03 crc kubenswrapper[4840]: I1209 17:45:03.826661 4840 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b7081818-31e8-4020-99eb-f658ed9adc9f-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 17:45:03 crc kubenswrapper[4840]: I1209 17:45:03.826675 4840 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b7081818-31e8-4020-99eb-f658ed9adc9f-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 17:45:04 crc kubenswrapper[4840]: I1209 17:45:04.036192 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:45:04 crc kubenswrapper[4840]: I1209 17:45:04.036248 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:45:04 crc kubenswrapper[4840]: I1209 17:45:04.036292 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:45:04 crc kubenswrapper[4840]: I1209 17:45:04.036757 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:45:04 crc kubenswrapper[4840]: I1209 17:45:04.036818 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" gracePeriod=600 Dec 09 17:45:04 crc kubenswrapper[4840]: I1209 17:45:04.149049 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" event={"ID":"b7081818-31e8-4020-99eb-f658ed9adc9f","Type":"ContainerDied","Data":"a1f17a403bc6823c273bcb7aecebcbf3f8daa35c13da75ff935cd9241618dc06"} Dec 09 17:45:04 crc kubenswrapper[4840]: I1209 17:45:04.149095 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1f17a403bc6823c273bcb7aecebcbf3f8daa35c13da75ff935cd9241618dc06" Dec 09 17:45:04 crc kubenswrapper[4840]: I1209 17:45:04.149155 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn" Dec 09 17:45:04 crc kubenswrapper[4840]: E1209 17:45:04.165404 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:45:04 crc kubenswrapper[4840]: I1209 17:45:04.680274 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j"] Dec 09 17:45:04 crc kubenswrapper[4840]: I1209 17:45:04.691749 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421660-sm56j"] Dec 09 17:45:05 crc kubenswrapper[4840]: I1209 17:45:05.160999 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" exitCode=0 Dec 09 17:45:05 crc kubenswrapper[4840]: I1209 17:45:05.161075 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7"} Dec 09 17:45:05 crc kubenswrapper[4840]: I1209 17:45:05.161536 4840 scope.go:117] "RemoveContainer" containerID="099e47ddd47d5e4281022c7138b08646f9cd89a2a26e6b5ed6287d0e1bceedcc" Dec 09 17:45:05 crc kubenswrapper[4840]: I1209 17:45:05.162606 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:45:05 crc kubenswrapper[4840]: E1209 17:45:05.163134 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:45:06 crc kubenswrapper[4840]: I1209 17:45:06.639189 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1c95712-598d-415d-b080-c5b7430d6186" path="/var/lib/kubelet/pods/e1c95712-598d-415d-b080-c5b7430d6186/volumes" Dec 09 17:45:07 crc kubenswrapper[4840]: E1209 17:45:07.611220 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:45:12 crc kubenswrapper[4840]: I1209 17:45:12.098567 4840 scope.go:117] "RemoveContainer" containerID="1376d1d11562b55216e698ca9e6169ce8d0d5779c1da116b8f6d17fed8c9e946" Dec 09 17:45:14 crc kubenswrapper[4840]: E1209 17:45:14.618608 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:45:15 crc kubenswrapper[4840]: I1209 17:45:15.608868 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:45:15 crc kubenswrapper[4840]: E1209 17:45:15.609217 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:45:20 crc kubenswrapper[4840]: E1209 17:45:20.610485 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:45:28 crc kubenswrapper[4840]: E1209 17:45:28.611623 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:45:30 crc kubenswrapper[4840]: I1209 17:45:30.608356 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:45:30 crc kubenswrapper[4840]: E1209 17:45:30.609491 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:45:31 crc kubenswrapper[4840]: E1209 17:45:31.610903 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:45:40 crc kubenswrapper[4840]: E1209 17:45:40.610944 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:45:45 crc kubenswrapper[4840]: I1209 17:45:45.608383 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:45:45 crc kubenswrapper[4840]: E1209 17:45:45.608986 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:45:46 crc kubenswrapper[4840]: E1209 17:45:46.610905 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:45:55 crc kubenswrapper[4840]: E1209 17:45:55.610574 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:45:58 crc kubenswrapper[4840]: E1209 17:45:58.611004 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:46:00 crc kubenswrapper[4840]: I1209 17:46:00.609123 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:46:00 crc kubenswrapper[4840]: E1209 17:46:00.610095 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:46:06 crc kubenswrapper[4840]: E1209 17:46:06.624309 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:46:13 crc kubenswrapper[4840]: E1209 17:46:13.611682 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:46:14 crc kubenswrapper[4840]: I1209 17:46:14.615079 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:46:14 crc kubenswrapper[4840]: E1209 17:46:14.615420 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:46:20 crc kubenswrapper[4840]: E1209 17:46:20.621592 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:46:26 crc kubenswrapper[4840]: E1209 17:46:26.610713 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:46:28 crc kubenswrapper[4840]: I1209 17:46:28.608898 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:46:28 crc kubenswrapper[4840]: E1209 17:46:28.609518 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:46:33 crc kubenswrapper[4840]: E1209 17:46:33.610777 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:46:38 crc kubenswrapper[4840]: E1209 17:46:38.610781 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:46:43 crc kubenswrapper[4840]: I1209 17:46:43.608364 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:46:43 crc kubenswrapper[4840]: E1209 17:46:43.609352 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:46:46 crc kubenswrapper[4840]: E1209 17:46:46.612333 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:46:51 crc kubenswrapper[4840]: E1209 17:46:51.612644 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:46:55 crc kubenswrapper[4840]: I1209 17:46:55.249482 4840 generic.go:334] "Generic (PLEG): container finished" podID="18b120a1-f1fb-4739-8c18-2a4380eb70e0" containerID="3f713485b0100adfc3fb8d88ae034cbfbbdadaec8400d65de54e05d6277cea6e" exitCode=2 Dec 09 17:46:55 crc kubenswrapper[4840]: I1209 17:46:55.249616 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" event={"ID":"18b120a1-f1fb-4739-8c18-2a4380eb70e0","Type":"ContainerDied","Data":"3f713485b0100adfc3fb8d88ae034cbfbbdadaec8400d65de54e05d6277cea6e"} Dec 09 17:46:56 crc kubenswrapper[4840]: I1209 17:46:56.609484 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:46:56 crc kubenswrapper[4840]: E1209 17:46:56.610193 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:46:56 crc kubenswrapper[4840]: I1209 17:46:56.805081 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:46:56 crc kubenswrapper[4840]: I1209 17:46:56.910804 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-inventory\") pod \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " Dec 09 17:46:56 crc kubenswrapper[4840]: I1209 17:46:56.910868 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4b55d\" (UniqueName: \"kubernetes.io/projected/18b120a1-f1fb-4739-8c18-2a4380eb70e0-kube-api-access-4b55d\") pod \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " Dec 09 17:46:56 crc kubenswrapper[4840]: I1209 17:46:56.911027 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-ssh-key\") pod \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\" (UID: \"18b120a1-f1fb-4739-8c18-2a4380eb70e0\") " Dec 09 17:46:56 crc kubenswrapper[4840]: I1209 17:46:56.919034 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18b120a1-f1fb-4739-8c18-2a4380eb70e0-kube-api-access-4b55d" (OuterVolumeSpecName: "kube-api-access-4b55d") pod "18b120a1-f1fb-4739-8c18-2a4380eb70e0" (UID: "18b120a1-f1fb-4739-8c18-2a4380eb70e0"). InnerVolumeSpecName "kube-api-access-4b55d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:46:56 crc kubenswrapper[4840]: I1209 17:46:56.941826 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-inventory" (OuterVolumeSpecName: "inventory") pod "18b120a1-f1fb-4739-8c18-2a4380eb70e0" (UID: "18b120a1-f1fb-4739-8c18-2a4380eb70e0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:46:56 crc kubenswrapper[4840]: I1209 17:46:56.964427 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "18b120a1-f1fb-4739-8c18-2a4380eb70e0" (UID: "18b120a1-f1fb-4739-8c18-2a4380eb70e0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:46:57 crc kubenswrapper[4840]: I1209 17:46:57.013584 4840 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 17:46:57 crc kubenswrapper[4840]: I1209 17:46:57.013628 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4b55d\" (UniqueName: \"kubernetes.io/projected/18b120a1-f1fb-4739-8c18-2a4380eb70e0-kube-api-access-4b55d\") on node \"crc\" DevicePath \"\"" Dec 09 17:46:57 crc kubenswrapper[4840]: I1209 17:46:57.013691 4840 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/18b120a1-f1fb-4739-8c18-2a4380eb70e0-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 17:46:57 crc kubenswrapper[4840]: I1209 17:46:57.281783 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" event={"ID":"18b120a1-f1fb-4739-8c18-2a4380eb70e0","Type":"ContainerDied","Data":"36ccfab1bf0baf3427ae331027cc3f2f8a6b32dcde98b9f4881df75e61f021a6"} Dec 09 17:46:57 crc kubenswrapper[4840]: I1209 17:46:57.282121 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36ccfab1bf0baf3427ae331027cc3f2f8a6b32dcde98b9f4881df75e61f021a6" Dec 09 17:46:57 crc kubenswrapper[4840]: I1209 17:46:57.281882 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr" Dec 09 17:46:58 crc kubenswrapper[4840]: E1209 17:46:58.610488 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:47:05 crc kubenswrapper[4840]: E1209 17:47:05.611853 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:47:11 crc kubenswrapper[4840]: I1209 17:47:11.608575 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:47:11 crc kubenswrapper[4840]: E1209 17:47:11.609643 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:47:12 crc kubenswrapper[4840]: E1209 17:47:12.610489 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:47:16 crc kubenswrapper[4840]: E1209 17:47:16.611217 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:47:24 crc kubenswrapper[4840]: I1209 17:47:24.620222 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:47:24 crc kubenswrapper[4840]: E1209 17:47:24.621407 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:47:24 crc kubenswrapper[4840]: E1209 17:47:24.621638 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:47:30 crc kubenswrapper[4840]: E1209 17:47:30.611311 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.029851 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh"] Dec 09 17:47:35 crc kubenswrapper[4840]: E1209 17:47:35.030946 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7081818-31e8-4020-99eb-f658ed9adc9f" containerName="collect-profiles" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.030986 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7081818-31e8-4020-99eb-f658ed9adc9f" containerName="collect-profiles" Dec 09 17:47:35 crc kubenswrapper[4840]: E1209 17:47:35.031027 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18b120a1-f1fb-4739-8c18-2a4380eb70e0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.031037 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="18b120a1-f1fb-4739-8c18-2a4380eb70e0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.031309 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7081818-31e8-4020-99eb-f658ed9adc9f" containerName="collect-profiles" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.031342 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="18b120a1-f1fb-4739-8c18-2a4380eb70e0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.032311 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.037576 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.037667 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.038094 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qrgfg" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.037901 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.039997 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh"] Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.122287 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2m8cg\" (UniqueName: \"kubernetes.io/projected/b3e6a41a-c85e-42c3-b473-c00456c83bf5-kube-api-access-2m8cg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c66mh\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.122336 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c66mh\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.122486 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c66mh\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.224059 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c66mh\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.224200 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2m8cg\" (UniqueName: \"kubernetes.io/projected/b3e6a41a-c85e-42c3-b473-c00456c83bf5-kube-api-access-2m8cg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c66mh\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.224224 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c66mh\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.229472 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c66mh\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.235524 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c66mh\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.243728 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2m8cg\" (UniqueName: \"kubernetes.io/projected/b3e6a41a-c85e-42c3-b473-c00456c83bf5-kube-api-access-2m8cg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c66mh\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:47:35 crc kubenswrapper[4840]: I1209 17:47:35.362549 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:47:35 crc kubenswrapper[4840]: E1209 17:47:35.611761 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:47:36 crc kubenswrapper[4840]: I1209 17:47:36.003291 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh"] Dec 09 17:47:36 crc kubenswrapper[4840]: I1209 17:47:36.666777 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" event={"ID":"b3e6a41a-c85e-42c3-b473-c00456c83bf5","Type":"ContainerStarted","Data":"1654f35504372cc6bd61753ccdaccb3a5c4cc654239e753e8ed1f16a1411506b"} Dec 09 17:47:37 crc kubenswrapper[4840]: I1209 17:47:37.681874 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" event={"ID":"b3e6a41a-c85e-42c3-b473-c00456c83bf5","Type":"ContainerStarted","Data":"9b6d6fc169b780042566c75055893efa493b4c330dc778a66d66ed36407ff2ef"} Dec 09 17:47:37 crc kubenswrapper[4840]: I1209 17:47:37.731607 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" podStartSLOduration=2.30353013 podStartE2EDuration="2.731586108s" podCreationTimestamp="2025-12-09 17:47:35 +0000 UTC" firstStartedPulling="2025-12-09 17:47:36.011556425 +0000 UTC m=+3042.002667058" lastFinishedPulling="2025-12-09 17:47:36.439612403 +0000 UTC m=+3042.430723036" observedRunningTime="2025-12-09 17:47:37.698096283 +0000 UTC m=+3043.689206916" watchObservedRunningTime="2025-12-09 17:47:37.731586108 +0000 UTC m=+3043.722696741" Dec 09 17:47:39 crc kubenswrapper[4840]: I1209 17:47:39.608809 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:47:39 crc kubenswrapper[4840]: E1209 17:47:39.609484 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:47:41 crc kubenswrapper[4840]: E1209 17:47:41.611598 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:47:50 crc kubenswrapper[4840]: I1209 17:47:50.612483 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 17:47:50 crc kubenswrapper[4840]: E1209 17:47:50.714034 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:47:50 crc kubenswrapper[4840]: E1209 17:47:50.714124 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:47:50 crc kubenswrapper[4840]: E1209 17:47:50.714327 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:47:50 crc kubenswrapper[4840]: E1209 17:47:50.715577 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:47:51 crc kubenswrapper[4840]: I1209 17:47:51.609778 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:47:51 crc kubenswrapper[4840]: E1209 17:47:51.610277 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:47:55 crc kubenswrapper[4840]: E1209 17:47:55.610470 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:48:05 crc kubenswrapper[4840]: I1209 17:48:05.609076 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:48:05 crc kubenswrapper[4840]: E1209 17:48:05.610404 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:48:05 crc kubenswrapper[4840]: E1209 17:48:05.610514 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:48:10 crc kubenswrapper[4840]: E1209 17:48:10.733767 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:48:10 crc kubenswrapper[4840]: E1209 17:48:10.735104 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:48:10 crc kubenswrapper[4840]: E1209 17:48:10.735275 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:48:10 crc kubenswrapper[4840]: E1209 17:48:10.736908 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:48:16 crc kubenswrapper[4840]: I1209 17:48:16.608351 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:48:16 crc kubenswrapper[4840]: E1209 17:48:16.608915 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:48:16 crc kubenswrapper[4840]: E1209 17:48:16.611666 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:48:22 crc kubenswrapper[4840]: E1209 17:48:22.610751 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:48:29 crc kubenswrapper[4840]: I1209 17:48:29.609245 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:48:29 crc kubenswrapper[4840]: E1209 17:48:29.610349 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:48:31 crc kubenswrapper[4840]: E1209 17:48:31.611243 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:48:33 crc kubenswrapper[4840]: E1209 17:48:33.611777 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:48:43 crc kubenswrapper[4840]: I1209 17:48:43.609504 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:48:43 crc kubenswrapper[4840]: E1209 17:48:43.610514 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:48:44 crc kubenswrapper[4840]: E1209 17:48:44.723012 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:48:44 crc kubenswrapper[4840]: E1209 17:48:44.722952 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:48:54 crc kubenswrapper[4840]: I1209 17:48:54.617100 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:48:54 crc kubenswrapper[4840]: E1209 17:48:54.617771 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:48:56 crc kubenswrapper[4840]: E1209 17:48:56.610498 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:48:59 crc kubenswrapper[4840]: E1209 17:48:59.612037 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:49:05 crc kubenswrapper[4840]: I1209 17:49:05.609129 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:49:05 crc kubenswrapper[4840]: E1209 17:49:05.609841 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:49:09 crc kubenswrapper[4840]: E1209 17:49:09.611007 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:49:10 crc kubenswrapper[4840]: E1209 17:49:10.610572 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:49:18 crc kubenswrapper[4840]: I1209 17:49:18.608555 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:49:18 crc kubenswrapper[4840]: E1209 17:49:18.610883 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:49:20 crc kubenswrapper[4840]: E1209 17:49:20.613164 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:49:23 crc kubenswrapper[4840]: E1209 17:49:23.610895 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:49:31 crc kubenswrapper[4840]: I1209 17:49:31.609410 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:49:31 crc kubenswrapper[4840]: E1209 17:49:31.610905 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:49:35 crc kubenswrapper[4840]: E1209 17:49:35.612035 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:49:36 crc kubenswrapper[4840]: E1209 17:49:36.609949 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:49:45 crc kubenswrapper[4840]: I1209 17:49:45.610125 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:49:45 crc kubenswrapper[4840]: E1209 17:49:45.610876 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:49:48 crc kubenswrapper[4840]: E1209 17:49:48.611758 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:49:51 crc kubenswrapper[4840]: E1209 17:49:51.610688 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:49:57 crc kubenswrapper[4840]: I1209 17:49:57.609035 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:49:57 crc kubenswrapper[4840]: E1209 17:49:57.609776 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:49:59 crc kubenswrapper[4840]: E1209 17:49:59.611041 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:50:06 crc kubenswrapper[4840]: E1209 17:50:06.610801 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:50:08 crc kubenswrapper[4840]: I1209 17:50:08.608896 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:50:09 crc kubenswrapper[4840]: I1209 17:50:09.255923 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"5f2325a33fc55da866851010414ceafb8876b8e6fda7106c24025a42fa4f0232"} Dec 09 17:50:10 crc kubenswrapper[4840]: E1209 17:50:10.610934 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:50:18 crc kubenswrapper[4840]: E1209 17:50:18.611095 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:50:22 crc kubenswrapper[4840]: E1209 17:50:22.611887 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:50:29 crc kubenswrapper[4840]: E1209 17:50:29.611482 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:50:33 crc kubenswrapper[4840]: E1209 17:50:33.613388 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:50:40 crc kubenswrapper[4840]: E1209 17:50:40.613666 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:50:44 crc kubenswrapper[4840]: E1209 17:50:44.616955 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:50:53 crc kubenswrapper[4840]: E1209 17:50:53.611430 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:50:57 crc kubenswrapper[4840]: E1209 17:50:57.611306 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:51:06 crc kubenswrapper[4840]: E1209 17:51:06.610196 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:51:09 crc kubenswrapper[4840]: E1209 17:51:09.611477 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:51:12 crc kubenswrapper[4840]: I1209 17:51:12.266894 4840 scope.go:117] "RemoveContainer" containerID="635713850be847e9dd2c799bc07dcc2219439146ecd8d6e360f57569386a9abd" Dec 09 17:51:12 crc kubenswrapper[4840]: I1209 17:51:12.294729 4840 scope.go:117] "RemoveContainer" containerID="c576bfba35196e75499075764f950d871eadfda25aeeb8a974f25ca4764bd111" Dec 09 17:51:12 crc kubenswrapper[4840]: I1209 17:51:12.352866 4840 scope.go:117] "RemoveContainer" containerID="bbc752e06e41a1ef4c619c4e7ca6620dc87bee5aa25def42f5240095b090318f" Dec 09 17:51:21 crc kubenswrapper[4840]: E1209 17:51:21.611169 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:51:24 crc kubenswrapper[4840]: E1209 17:51:24.623400 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:51:32 crc kubenswrapper[4840]: E1209 17:51:32.611542 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:51:37 crc kubenswrapper[4840]: E1209 17:51:37.611082 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:51:46 crc kubenswrapper[4840]: E1209 17:51:46.610837 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:51:49 crc kubenswrapper[4840]: E1209 17:51:49.612264 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.359838 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pms84"] Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.363374 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.366819 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-utilities\") pod \"redhat-operators-pms84\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.367096 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-catalog-content\") pod \"redhat-operators-pms84\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.367158 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrnzw\" (UniqueName: \"kubernetes.io/projected/d73b7b4b-90b7-40e6-b7d9-e80040297488-kube-api-access-lrnzw\") pod \"redhat-operators-pms84\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.377377 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pms84"] Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.469649 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-catalog-content\") pod \"redhat-operators-pms84\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.469753 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrnzw\" (UniqueName: \"kubernetes.io/projected/d73b7b4b-90b7-40e6-b7d9-e80040297488-kube-api-access-lrnzw\") pod \"redhat-operators-pms84\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.469899 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-utilities\") pod \"redhat-operators-pms84\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.470402 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-utilities\") pod \"redhat-operators-pms84\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.470644 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-catalog-content\") pod \"redhat-operators-pms84\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.490590 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrnzw\" (UniqueName: \"kubernetes.io/projected/d73b7b4b-90b7-40e6-b7d9-e80040297488-kube-api-access-lrnzw\") pod \"redhat-operators-pms84\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:51:57 crc kubenswrapper[4840]: I1209 17:51:57.687035 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:51:58 crc kubenswrapper[4840]: I1209 17:51:58.222218 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pms84"] Dec 09 17:51:58 crc kubenswrapper[4840]: I1209 17:51:58.365866 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pms84" event={"ID":"d73b7b4b-90b7-40e6-b7d9-e80040297488","Type":"ContainerStarted","Data":"146320e99a8313a5cdbeda02e849ab249887649bb1fa35d0ea43565862ffa004"} Dec 09 17:51:59 crc kubenswrapper[4840]: I1209 17:51:59.391470 4840 generic.go:334] "Generic (PLEG): container finished" podID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerID="9e5d223b329f98b6b60a5858117a528b90e37c00b9ddff964aa8c9e7dbb8d9af" exitCode=0 Dec 09 17:51:59 crc kubenswrapper[4840]: I1209 17:51:59.391583 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pms84" event={"ID":"d73b7b4b-90b7-40e6-b7d9-e80040297488","Type":"ContainerDied","Data":"9e5d223b329f98b6b60a5858117a528b90e37c00b9ddff964aa8c9e7dbb8d9af"} Dec 09 17:51:59 crc kubenswrapper[4840]: E1209 17:51:59.610891 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:52:01 crc kubenswrapper[4840]: I1209 17:52:01.414205 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pms84" event={"ID":"d73b7b4b-90b7-40e6-b7d9-e80040297488","Type":"ContainerStarted","Data":"23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f"} Dec 09 17:52:01 crc kubenswrapper[4840]: E1209 17:52:01.611523 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:52:04 crc kubenswrapper[4840]: I1209 17:52:04.451603 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pms84" event={"ID":"d73b7b4b-90b7-40e6-b7d9-e80040297488","Type":"ContainerDied","Data":"23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f"} Dec 09 17:52:04 crc kubenswrapper[4840]: I1209 17:52:04.451563 4840 generic.go:334] "Generic (PLEG): container finished" podID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerID="23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f" exitCode=0 Dec 09 17:52:05 crc kubenswrapper[4840]: I1209 17:52:05.465170 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pms84" event={"ID":"d73b7b4b-90b7-40e6-b7d9-e80040297488","Type":"ContainerStarted","Data":"e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a"} Dec 09 17:52:05 crc kubenswrapper[4840]: I1209 17:52:05.492638 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pms84" podStartSLOduration=2.776501245 podStartE2EDuration="8.49261668s" podCreationTimestamp="2025-12-09 17:51:57 +0000 UTC" firstStartedPulling="2025-12-09 17:51:59.395458768 +0000 UTC m=+3305.386569401" lastFinishedPulling="2025-12-09 17:52:05.111574203 +0000 UTC m=+3311.102684836" observedRunningTime="2025-12-09 17:52:05.481213015 +0000 UTC m=+3311.472323648" watchObservedRunningTime="2025-12-09 17:52:05.49261668 +0000 UTC m=+3311.483727323" Dec 09 17:52:07 crc kubenswrapper[4840]: I1209 17:52:07.687177 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:52:07 crc kubenswrapper[4840]: I1209 17:52:07.687462 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:52:08 crc kubenswrapper[4840]: I1209 17:52:08.740317 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pms84" podUID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerName="registry-server" probeResult="failure" output=< Dec 09 17:52:08 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 17:52:08 crc kubenswrapper[4840]: > Dec 09 17:52:11 crc kubenswrapper[4840]: E1209 17:52:11.610270 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:52:15 crc kubenswrapper[4840]: E1209 17:52:15.610751 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:52:17 crc kubenswrapper[4840]: I1209 17:52:17.739175 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:52:17 crc kubenswrapper[4840]: I1209 17:52:17.788375 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:52:17 crc kubenswrapper[4840]: I1209 17:52:17.990461 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pms84"] Dec 09 17:52:19 crc kubenswrapper[4840]: I1209 17:52:19.588959 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pms84" podUID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerName="registry-server" containerID="cri-o://e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a" gracePeriod=2 Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.111322 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.155305 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-utilities\") pod \"d73b7b4b-90b7-40e6-b7d9-e80040297488\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.155443 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrnzw\" (UniqueName: \"kubernetes.io/projected/d73b7b4b-90b7-40e6-b7d9-e80040297488-kube-api-access-lrnzw\") pod \"d73b7b4b-90b7-40e6-b7d9-e80040297488\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.155505 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-catalog-content\") pod \"d73b7b4b-90b7-40e6-b7d9-e80040297488\" (UID: \"d73b7b4b-90b7-40e6-b7d9-e80040297488\") " Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.155994 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-utilities" (OuterVolumeSpecName: "utilities") pod "d73b7b4b-90b7-40e6-b7d9-e80040297488" (UID: "d73b7b4b-90b7-40e6-b7d9-e80040297488"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.160598 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d73b7b4b-90b7-40e6-b7d9-e80040297488-kube-api-access-lrnzw" (OuterVolumeSpecName: "kube-api-access-lrnzw") pod "d73b7b4b-90b7-40e6-b7d9-e80040297488" (UID: "d73b7b4b-90b7-40e6-b7d9-e80040297488"). InnerVolumeSpecName "kube-api-access-lrnzw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.258208 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.258259 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrnzw\" (UniqueName: \"kubernetes.io/projected/d73b7b4b-90b7-40e6-b7d9-e80040297488-kube-api-access-lrnzw\") on node \"crc\" DevicePath \"\"" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.262501 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d73b7b4b-90b7-40e6-b7d9-e80040297488" (UID: "d73b7b4b-90b7-40e6-b7d9-e80040297488"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.360175 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73b7b4b-90b7-40e6-b7d9-e80040297488-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.601449 4840 generic.go:334] "Generic (PLEG): container finished" podID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerID="e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a" exitCode=0 Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.601508 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pms84" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.601521 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pms84" event={"ID":"d73b7b4b-90b7-40e6-b7d9-e80040297488","Type":"ContainerDied","Data":"e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a"} Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.601570 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pms84" event={"ID":"d73b7b4b-90b7-40e6-b7d9-e80040297488","Type":"ContainerDied","Data":"146320e99a8313a5cdbeda02e849ab249887649bb1fa35d0ea43565862ffa004"} Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.601598 4840 scope.go:117] "RemoveContainer" containerID="e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.642220 4840 scope.go:117] "RemoveContainer" containerID="23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.643243 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pms84"] Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.658527 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pms84"] Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.679436 4840 scope.go:117] "RemoveContainer" containerID="9e5d223b329f98b6b60a5858117a528b90e37c00b9ddff964aa8c9e7dbb8d9af" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.734124 4840 scope.go:117] "RemoveContainer" containerID="e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a" Dec 09 17:52:20 crc kubenswrapper[4840]: E1209 17:52:20.734652 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a\": container with ID starting with e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a not found: ID does not exist" containerID="e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.734698 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a"} err="failed to get container status \"e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a\": rpc error: code = NotFound desc = could not find container \"e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a\": container with ID starting with e6b0164855f7feae294c77432fa222dd234e4b8fbbb0f8d38c8f0230ab0fc30a not found: ID does not exist" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.734726 4840 scope.go:117] "RemoveContainer" containerID="23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f" Dec 09 17:52:20 crc kubenswrapper[4840]: E1209 17:52:20.735116 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f\": container with ID starting with 23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f not found: ID does not exist" containerID="23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.735163 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f"} err="failed to get container status \"23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f\": rpc error: code = NotFound desc = could not find container \"23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f\": container with ID starting with 23968787a9702b6a9d3960e8f5537e3419bafe9aa9e370be43b238ae8db5bb0f not found: ID does not exist" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.735197 4840 scope.go:117] "RemoveContainer" containerID="9e5d223b329f98b6b60a5858117a528b90e37c00b9ddff964aa8c9e7dbb8d9af" Dec 09 17:52:20 crc kubenswrapper[4840]: E1209 17:52:20.736581 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e5d223b329f98b6b60a5858117a528b90e37c00b9ddff964aa8c9e7dbb8d9af\": container with ID starting with 9e5d223b329f98b6b60a5858117a528b90e37c00b9ddff964aa8c9e7dbb8d9af not found: ID does not exist" containerID="9e5d223b329f98b6b60a5858117a528b90e37c00b9ddff964aa8c9e7dbb8d9af" Dec 09 17:52:20 crc kubenswrapper[4840]: I1209 17:52:20.736609 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e5d223b329f98b6b60a5858117a528b90e37c00b9ddff964aa8c9e7dbb8d9af"} err="failed to get container status \"9e5d223b329f98b6b60a5858117a528b90e37c00b9ddff964aa8c9e7dbb8d9af\": rpc error: code = NotFound desc = could not find container \"9e5d223b329f98b6b60a5858117a528b90e37c00b9ddff964aa8c9e7dbb8d9af\": container with ID starting with 9e5d223b329f98b6b60a5858117a528b90e37c00b9ddff964aa8c9e7dbb8d9af not found: ID does not exist" Dec 09 17:52:22 crc kubenswrapper[4840]: E1209 17:52:22.610622 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:52:22 crc kubenswrapper[4840]: I1209 17:52:22.631927 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d73b7b4b-90b7-40e6-b7d9-e80040297488" path="/var/lib/kubelet/pods/d73b7b4b-90b7-40e6-b7d9-e80040297488/volumes" Dec 09 17:52:26 crc kubenswrapper[4840]: E1209 17:52:26.611533 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:52:34 crc kubenswrapper[4840]: I1209 17:52:34.035745 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:52:34 crc kubenswrapper[4840]: I1209 17:52:34.037240 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:52:34 crc kubenswrapper[4840]: E1209 17:52:34.617575 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:52:37 crc kubenswrapper[4840]: E1209 17:52:37.609996 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:52:47 crc kubenswrapper[4840]: E1209 17:52:47.612824 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:52:51 crc kubenswrapper[4840]: I1209 17:52:51.610478 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 17:52:51 crc kubenswrapper[4840]: E1209 17:52:51.698325 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:52:51 crc kubenswrapper[4840]: E1209 17:52:51.698388 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:52:51 crc kubenswrapper[4840]: E1209 17:52:51.698535 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:52:51 crc kubenswrapper[4840]: E1209 17:52:51.699787 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:53:00 crc kubenswrapper[4840]: E1209 17:53:00.611141 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:53:03 crc kubenswrapper[4840]: E1209 17:53:03.611084 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:53:04 crc kubenswrapper[4840]: I1209 17:53:04.036622 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:53:04 crc kubenswrapper[4840]: I1209 17:53:04.036927 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:53:14 crc kubenswrapper[4840]: E1209 17:53:14.617524 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:53:14 crc kubenswrapper[4840]: E1209 17:53:14.748065 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:53:14 crc kubenswrapper[4840]: E1209 17:53:14.748133 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:53:14 crc kubenswrapper[4840]: E1209 17:53:14.748333 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:53:14 crc kubenswrapper[4840]: E1209 17:53:14.749580 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:53:28 crc kubenswrapper[4840]: E1209 17:53:28.610576 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:53:28 crc kubenswrapper[4840]: E1209 17:53:28.612319 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:53:34 crc kubenswrapper[4840]: I1209 17:53:34.035756 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:53:34 crc kubenswrapper[4840]: I1209 17:53:34.037187 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:53:34 crc kubenswrapper[4840]: I1209 17:53:34.037295 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:53:34 crc kubenswrapper[4840]: I1209 17:53:34.038128 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5f2325a33fc55da866851010414ceafb8876b8e6fda7106c24025a42fa4f0232"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:53:34 crc kubenswrapper[4840]: I1209 17:53:34.038287 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://5f2325a33fc55da866851010414ceafb8876b8e6fda7106c24025a42fa4f0232" gracePeriod=600 Dec 09 17:53:34 crc kubenswrapper[4840]: I1209 17:53:34.284225 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="5f2325a33fc55da866851010414ceafb8876b8e6fda7106c24025a42fa4f0232" exitCode=0 Dec 09 17:53:34 crc kubenswrapper[4840]: I1209 17:53:34.284277 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"5f2325a33fc55da866851010414ceafb8876b8e6fda7106c24025a42fa4f0232"} Dec 09 17:53:34 crc kubenswrapper[4840]: I1209 17:53:34.284316 4840 scope.go:117] "RemoveContainer" containerID="2a896f1b33a61755fe6bae5b550ca07153501816676f1d9b3dd16f0ad757a8f7" Dec 09 17:53:35 crc kubenswrapper[4840]: I1209 17:53:35.315827 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc"} Dec 09 17:53:39 crc kubenswrapper[4840]: E1209 17:53:39.611283 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:53:42 crc kubenswrapper[4840]: E1209 17:53:42.611561 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:53:53 crc kubenswrapper[4840]: E1209 17:53:53.610665 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:53:55 crc kubenswrapper[4840]: E1209 17:53:55.610645 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:53:56 crc kubenswrapper[4840]: I1209 17:53:56.542126 4840 generic.go:334] "Generic (PLEG): container finished" podID="b3e6a41a-c85e-42c3-b473-c00456c83bf5" containerID="9b6d6fc169b780042566c75055893efa493b4c330dc778a66d66ed36407ff2ef" exitCode=2 Dec 09 17:53:56 crc kubenswrapper[4840]: I1209 17:53:56.542180 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" event={"ID":"b3e6a41a-c85e-42c3-b473-c00456c83bf5","Type":"ContainerDied","Data":"9b6d6fc169b780042566c75055893efa493b4c330dc778a66d66ed36407ff2ef"} Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.106232 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.247468 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2m8cg\" (UniqueName: \"kubernetes.io/projected/b3e6a41a-c85e-42c3-b473-c00456c83bf5-kube-api-access-2m8cg\") pod \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.247699 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-ssh-key\") pod \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.247821 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-inventory\") pod \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\" (UID: \"b3e6a41a-c85e-42c3-b473-c00456c83bf5\") " Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.253364 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3e6a41a-c85e-42c3-b473-c00456c83bf5-kube-api-access-2m8cg" (OuterVolumeSpecName: "kube-api-access-2m8cg") pod "b3e6a41a-c85e-42c3-b473-c00456c83bf5" (UID: "b3e6a41a-c85e-42c3-b473-c00456c83bf5"). InnerVolumeSpecName "kube-api-access-2m8cg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.274498 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b3e6a41a-c85e-42c3-b473-c00456c83bf5" (UID: "b3e6a41a-c85e-42c3-b473-c00456c83bf5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.287500 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-inventory" (OuterVolumeSpecName: "inventory") pod "b3e6a41a-c85e-42c3-b473-c00456c83bf5" (UID: "b3e6a41a-c85e-42c3-b473-c00456c83bf5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.350094 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2m8cg\" (UniqueName: \"kubernetes.io/projected/b3e6a41a-c85e-42c3-b473-c00456c83bf5-kube-api-access-2m8cg\") on node \"crc\" DevicePath \"\"" Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.350132 4840 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.350145 4840 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3e6a41a-c85e-42c3-b473-c00456c83bf5-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.564331 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" event={"ID":"b3e6a41a-c85e-42c3-b473-c00456c83bf5","Type":"ContainerDied","Data":"1654f35504372cc6bd61753ccdaccb3a5c4cc654239e753e8ed1f16a1411506b"} Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.564385 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1654f35504372cc6bd61753ccdaccb3a5c4cc654239e753e8ed1f16a1411506b" Dec 09 17:53:58 crc kubenswrapper[4840]: I1209 17:53:58.564472 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c66mh" Dec 09 17:54:05 crc kubenswrapper[4840]: E1209 17:54:05.611320 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:54:06 crc kubenswrapper[4840]: E1209 17:54:06.609228 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:54:18 crc kubenswrapper[4840]: E1209 17:54:18.611131 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:54:21 crc kubenswrapper[4840]: E1209 17:54:21.611549 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:54:29 crc kubenswrapper[4840]: E1209 17:54:29.611114 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:54:33 crc kubenswrapper[4840]: E1209 17:54:33.612145 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:54:40 crc kubenswrapper[4840]: E1209 17:54:40.611196 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.147296 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bp9kx"] Dec 09 17:54:44 crc kubenswrapper[4840]: E1209 17:54:44.148324 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3e6a41a-c85e-42c3-b473-c00456c83bf5" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.148343 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3e6a41a-c85e-42c3-b473-c00456c83bf5" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:54:44 crc kubenswrapper[4840]: E1209 17:54:44.148367 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerName="extract-content" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.148373 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerName="extract-content" Dec 09 17:54:44 crc kubenswrapper[4840]: E1209 17:54:44.148396 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerName="registry-server" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.148401 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerName="registry-server" Dec 09 17:54:44 crc kubenswrapper[4840]: E1209 17:54:44.148415 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerName="extract-utilities" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.148421 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerName="extract-utilities" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.148693 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3e6a41a-c85e-42c3-b473-c00456c83bf5" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.148719 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="d73b7b4b-90b7-40e6-b7d9-e80040297488" containerName="registry-server" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.150288 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.163733 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bp9kx"] Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.302919 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-catalog-content\") pod \"community-operators-bp9kx\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.303110 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-utilities\") pod \"community-operators-bp9kx\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.303496 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slb4c\" (UniqueName: \"kubernetes.io/projected/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-kube-api-access-slb4c\") pod \"community-operators-bp9kx\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.364302 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bqzvr"] Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.366621 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.375920 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bqzvr"] Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.405409 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-catalog-content\") pod \"community-operators-bp9kx\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.405476 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-utilities\") pod \"community-operators-bp9kx\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.405587 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slb4c\" (UniqueName: \"kubernetes.io/projected/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-kube-api-access-slb4c\") pod \"community-operators-bp9kx\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.406515 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-catalog-content\") pod \"community-operators-bp9kx\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.406800 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-utilities\") pod \"community-operators-bp9kx\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.434443 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slb4c\" (UniqueName: \"kubernetes.io/projected/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-kube-api-access-slb4c\") pod \"community-operators-bp9kx\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.497525 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.507015 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdn6w\" (UniqueName: \"kubernetes.io/projected/c1a67e7b-6b6f-4295-893f-43a6225efa13-kube-api-access-tdn6w\") pod \"certified-operators-bqzvr\" (UID: \"c1a67e7b-6b6f-4295-893f-43a6225efa13\") " pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.507239 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1a67e7b-6b6f-4295-893f-43a6225efa13-utilities\") pod \"certified-operators-bqzvr\" (UID: \"c1a67e7b-6b6f-4295-893f-43a6225efa13\") " pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.507486 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1a67e7b-6b6f-4295-893f-43a6225efa13-catalog-content\") pod \"certified-operators-bqzvr\" (UID: \"c1a67e7b-6b6f-4295-893f-43a6225efa13\") " pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.611320 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdn6w\" (UniqueName: \"kubernetes.io/projected/c1a67e7b-6b6f-4295-893f-43a6225efa13-kube-api-access-tdn6w\") pod \"certified-operators-bqzvr\" (UID: \"c1a67e7b-6b6f-4295-893f-43a6225efa13\") " pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.611683 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1a67e7b-6b6f-4295-893f-43a6225efa13-utilities\") pod \"certified-operators-bqzvr\" (UID: \"c1a67e7b-6b6f-4295-893f-43a6225efa13\") " pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.611747 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1a67e7b-6b6f-4295-893f-43a6225efa13-catalog-content\") pod \"certified-operators-bqzvr\" (UID: \"c1a67e7b-6b6f-4295-893f-43a6225efa13\") " pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.612186 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1a67e7b-6b6f-4295-893f-43a6225efa13-utilities\") pod \"certified-operators-bqzvr\" (UID: \"c1a67e7b-6b6f-4295-893f-43a6225efa13\") " pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.613277 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1a67e7b-6b6f-4295-893f-43a6225efa13-catalog-content\") pod \"certified-operators-bqzvr\" (UID: \"c1a67e7b-6b6f-4295-893f-43a6225efa13\") " pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.640373 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdn6w\" (UniqueName: \"kubernetes.io/projected/c1a67e7b-6b6f-4295-893f-43a6225efa13-kube-api-access-tdn6w\") pod \"certified-operators-bqzvr\" (UID: \"c1a67e7b-6b6f-4295-893f-43a6225efa13\") " pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:54:44 crc kubenswrapper[4840]: I1209 17:54:44.712104 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:54:45 crc kubenswrapper[4840]: I1209 17:54:45.105760 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bp9kx"] Dec 09 17:54:45 crc kubenswrapper[4840]: I1209 17:54:45.374849 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bqzvr"] Dec 09 17:54:46 crc kubenswrapper[4840]: I1209 17:54:46.022681 4840 generic.go:334] "Generic (PLEG): container finished" podID="c1a67e7b-6b6f-4295-893f-43a6225efa13" containerID="1c902cbbd895c3506b8975d8cdf384b7d970559c128279b384e6cdc77ed49c14" exitCode=0 Dec 09 17:54:46 crc kubenswrapper[4840]: I1209 17:54:46.022743 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bqzvr" event={"ID":"c1a67e7b-6b6f-4295-893f-43a6225efa13","Type":"ContainerDied","Data":"1c902cbbd895c3506b8975d8cdf384b7d970559c128279b384e6cdc77ed49c14"} Dec 09 17:54:46 crc kubenswrapper[4840]: I1209 17:54:46.022768 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bqzvr" event={"ID":"c1a67e7b-6b6f-4295-893f-43a6225efa13","Type":"ContainerStarted","Data":"b0da643b0206ff1d63cdc0b37dda6c7dc2da90e66601a6f5b7b3ecf42ee309c8"} Dec 09 17:54:46 crc kubenswrapper[4840]: I1209 17:54:46.025710 4840 generic.go:334] "Generic (PLEG): container finished" podID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerID="7c3e02c007ac58f76fd2af3f1adac3412cd215880ef92b4ac9ab9889132a0d31" exitCode=0 Dec 09 17:54:46 crc kubenswrapper[4840]: I1209 17:54:46.025751 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bp9kx" event={"ID":"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d","Type":"ContainerDied","Data":"7c3e02c007ac58f76fd2af3f1adac3412cd215880ef92b4ac9ab9889132a0d31"} Dec 09 17:54:46 crc kubenswrapper[4840]: I1209 17:54:46.025779 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bp9kx" event={"ID":"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d","Type":"ContainerStarted","Data":"51f38e394b179b60ad854262518d2201a3e28abd3fad4a73f4c20f779e944bd8"} Dec 09 17:54:47 crc kubenswrapper[4840]: I1209 17:54:47.039542 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bp9kx" event={"ID":"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d","Type":"ContainerStarted","Data":"efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8"} Dec 09 17:54:47 crc kubenswrapper[4840]: E1209 17:54:47.610803 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:54:48 crc kubenswrapper[4840]: I1209 17:54:48.058167 4840 generic.go:334] "Generic (PLEG): container finished" podID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerID="efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8" exitCode=0 Dec 09 17:54:48 crc kubenswrapper[4840]: I1209 17:54:48.058211 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bp9kx" event={"ID":"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d","Type":"ContainerDied","Data":"efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8"} Dec 09 17:54:48 crc kubenswrapper[4840]: I1209 17:54:48.756045 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-x9m7b"] Dec 09 17:54:48 crc kubenswrapper[4840]: I1209 17:54:48.759649 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:48 crc kubenswrapper[4840]: I1209 17:54:48.788755 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x9m7b"] Dec 09 17:54:48 crc kubenswrapper[4840]: I1209 17:54:48.909160 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-utilities\") pod \"redhat-marketplace-x9m7b\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:48 crc kubenswrapper[4840]: I1209 17:54:48.909349 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zf4bs\" (UniqueName: \"kubernetes.io/projected/3cc0ccf1-81a0-497a-9049-72af31bb07ce-kube-api-access-zf4bs\") pod \"redhat-marketplace-x9m7b\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:48 crc kubenswrapper[4840]: I1209 17:54:48.909395 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-catalog-content\") pod \"redhat-marketplace-x9m7b\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:49 crc kubenswrapper[4840]: I1209 17:54:49.011751 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zf4bs\" (UniqueName: \"kubernetes.io/projected/3cc0ccf1-81a0-497a-9049-72af31bb07ce-kube-api-access-zf4bs\") pod \"redhat-marketplace-x9m7b\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:49 crc kubenswrapper[4840]: I1209 17:54:49.011807 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-catalog-content\") pod \"redhat-marketplace-x9m7b\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:49 crc kubenswrapper[4840]: I1209 17:54:49.011977 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-utilities\") pod \"redhat-marketplace-x9m7b\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:49 crc kubenswrapper[4840]: I1209 17:54:49.012533 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-utilities\") pod \"redhat-marketplace-x9m7b\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:49 crc kubenswrapper[4840]: I1209 17:54:49.012581 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-catalog-content\") pod \"redhat-marketplace-x9m7b\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:49 crc kubenswrapper[4840]: I1209 17:54:49.043037 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zf4bs\" (UniqueName: \"kubernetes.io/projected/3cc0ccf1-81a0-497a-9049-72af31bb07ce-kube-api-access-zf4bs\") pod \"redhat-marketplace-x9m7b\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:49 crc kubenswrapper[4840]: I1209 17:54:49.090764 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:53 crc kubenswrapper[4840]: I1209 17:54:53.074385 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x9m7b"] Dec 09 17:54:53 crc kubenswrapper[4840]: W1209 17:54:53.078068 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3cc0ccf1_81a0_497a_9049_72af31bb07ce.slice/crio-081d7a4f6415779744966ef281e615bf718fab66746772e03a0e18ede1b5cf6e WatchSource:0}: Error finding container 081d7a4f6415779744966ef281e615bf718fab66746772e03a0e18ede1b5cf6e: Status 404 returned error can't find the container with id 081d7a4f6415779744966ef281e615bf718fab66746772e03a0e18ede1b5cf6e Dec 09 17:54:53 crc kubenswrapper[4840]: I1209 17:54:53.106660 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bp9kx" event={"ID":"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d","Type":"ContainerStarted","Data":"f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671"} Dec 09 17:54:53 crc kubenswrapper[4840]: I1209 17:54:53.108335 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9m7b" event={"ID":"3cc0ccf1-81a0-497a-9049-72af31bb07ce","Type":"ContainerStarted","Data":"081d7a4f6415779744966ef281e615bf718fab66746772e03a0e18ede1b5cf6e"} Dec 09 17:54:53 crc kubenswrapper[4840]: I1209 17:54:53.110333 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bqzvr" event={"ID":"c1a67e7b-6b6f-4295-893f-43a6225efa13","Type":"ContainerStarted","Data":"ac315e9c5a5b5544013b32ce6e3ad9f19301ae16db16a31074e6fb77bbea3192"} Dec 09 17:54:53 crc kubenswrapper[4840]: I1209 17:54:53.130838 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bp9kx" podStartSLOduration=2.546830071 podStartE2EDuration="9.130817603s" podCreationTimestamp="2025-12-09 17:54:44 +0000 UTC" firstStartedPulling="2025-12-09 17:54:46.029805346 +0000 UTC m=+3472.020915979" lastFinishedPulling="2025-12-09 17:54:52.613792828 +0000 UTC m=+3478.604903511" observedRunningTime="2025-12-09 17:54:53.124354568 +0000 UTC m=+3479.115465201" watchObservedRunningTime="2025-12-09 17:54:53.130817603 +0000 UTC m=+3479.121928236" Dec 09 17:54:54 crc kubenswrapper[4840]: I1209 17:54:54.125925 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9m7b" event={"ID":"3cc0ccf1-81a0-497a-9049-72af31bb07ce","Type":"ContainerStarted","Data":"4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158"} Dec 09 17:54:54 crc kubenswrapper[4840]: I1209 17:54:54.129410 4840 generic.go:334] "Generic (PLEG): container finished" podID="c1a67e7b-6b6f-4295-893f-43a6225efa13" containerID="ac315e9c5a5b5544013b32ce6e3ad9f19301ae16db16a31074e6fb77bbea3192" exitCode=0 Dec 09 17:54:54 crc kubenswrapper[4840]: I1209 17:54:54.129511 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bqzvr" event={"ID":"c1a67e7b-6b6f-4295-893f-43a6225efa13","Type":"ContainerDied","Data":"ac315e9c5a5b5544013b32ce6e3ad9f19301ae16db16a31074e6fb77bbea3192"} Dec 09 17:54:54 crc kubenswrapper[4840]: I1209 17:54:54.497869 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:54 crc kubenswrapper[4840]: I1209 17:54:54.497927 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:54:55 crc kubenswrapper[4840]: I1209 17:54:55.141849 4840 generic.go:334] "Generic (PLEG): container finished" podID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerID="4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158" exitCode=0 Dec 09 17:54:55 crc kubenswrapper[4840]: I1209 17:54:55.141920 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9m7b" event={"ID":"3cc0ccf1-81a0-497a-9049-72af31bb07ce","Type":"ContainerDied","Data":"4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158"} Dec 09 17:54:55 crc kubenswrapper[4840]: I1209 17:54:55.545160 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-bp9kx" podUID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerName="registry-server" probeResult="failure" output=< Dec 09 17:54:55 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 17:54:55 crc kubenswrapper[4840]: > Dec 09 17:54:55 crc kubenswrapper[4840]: E1209 17:54:55.609789 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:54:56 crc kubenswrapper[4840]: I1209 17:54:56.156177 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bqzvr" event={"ID":"c1a67e7b-6b6f-4295-893f-43a6225efa13","Type":"ContainerStarted","Data":"54cbf0b8fadf1bb062d8f888377a24136132efbd16d18fd9bdb9391d7d322cdd"} Dec 09 17:54:56 crc kubenswrapper[4840]: I1209 17:54:56.159118 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9m7b" event={"ID":"3cc0ccf1-81a0-497a-9049-72af31bb07ce","Type":"ContainerStarted","Data":"11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408"} Dec 09 17:54:56 crc kubenswrapper[4840]: I1209 17:54:56.191432 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bqzvr" podStartSLOduration=2.48863359 podStartE2EDuration="12.191410751s" podCreationTimestamp="2025-12-09 17:54:44 +0000 UTC" firstStartedPulling="2025-12-09 17:54:46.024570797 +0000 UTC m=+3472.015681430" lastFinishedPulling="2025-12-09 17:54:55.727347948 +0000 UTC m=+3481.718458591" observedRunningTime="2025-12-09 17:54:56.180843269 +0000 UTC m=+3482.171953902" watchObservedRunningTime="2025-12-09 17:54:56.191410751 +0000 UTC m=+3482.182521384" Dec 09 17:54:57 crc kubenswrapper[4840]: I1209 17:54:57.172901 4840 generic.go:334] "Generic (PLEG): container finished" podID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerID="11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408" exitCode=0 Dec 09 17:54:57 crc kubenswrapper[4840]: I1209 17:54:57.173009 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9m7b" event={"ID":"3cc0ccf1-81a0-497a-9049-72af31bb07ce","Type":"ContainerDied","Data":"11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408"} Dec 09 17:54:58 crc kubenswrapper[4840]: I1209 17:54:58.222196 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9m7b" event={"ID":"3cc0ccf1-81a0-497a-9049-72af31bb07ce","Type":"ContainerStarted","Data":"759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f"} Dec 09 17:54:58 crc kubenswrapper[4840]: I1209 17:54:58.251351 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-x9m7b" podStartSLOduration=7.509349172 podStartE2EDuration="10.25133107s" podCreationTimestamp="2025-12-09 17:54:48 +0000 UTC" firstStartedPulling="2025-12-09 17:54:55.144015568 +0000 UTC m=+3481.135126201" lastFinishedPulling="2025-12-09 17:54:57.885997466 +0000 UTC m=+3483.877108099" observedRunningTime="2025-12-09 17:54:58.239720758 +0000 UTC m=+3484.230831391" watchObservedRunningTime="2025-12-09 17:54:58.25133107 +0000 UTC m=+3484.242441703" Dec 09 17:54:59 crc kubenswrapper[4840]: I1209 17:54:59.091939 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:54:59 crc kubenswrapper[4840]: I1209 17:54:59.092010 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:55:00 crc kubenswrapper[4840]: I1209 17:55:00.147999 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-x9m7b" podUID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerName="registry-server" probeResult="failure" output=< Dec 09 17:55:00 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 17:55:00 crc kubenswrapper[4840]: > Dec 09 17:55:02 crc kubenswrapper[4840]: E1209 17:55:02.611578 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:55:04 crc kubenswrapper[4840]: I1209 17:55:04.543589 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:55:04 crc kubenswrapper[4840]: I1209 17:55:04.591044 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:55:04 crc kubenswrapper[4840]: I1209 17:55:04.712798 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:55:04 crc kubenswrapper[4840]: I1209 17:55:04.712864 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:55:04 crc kubenswrapper[4840]: I1209 17:55:04.769900 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:55:04 crc kubenswrapper[4840]: I1209 17:55:04.790311 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bp9kx"] Dec 09 17:55:05 crc kubenswrapper[4840]: I1209 17:55:05.354769 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bqzvr" Dec 09 17:55:06 crc kubenswrapper[4840]: I1209 17:55:06.324785 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bp9kx" podUID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerName="registry-server" containerID="cri-o://f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671" gracePeriod=2 Dec 09 17:55:06 crc kubenswrapper[4840]: E1209 17:55:06.672786 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8a468c2_c0f3_4dc9_94bd_6ffc724a241d.slice/crio-conmon-f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671.scope\": RecentStats: unable to find data in memory cache]" Dec 09 17:55:06 crc kubenswrapper[4840]: I1209 17:55:06.813567 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bqzvr"] Dec 09 17:55:06 crc kubenswrapper[4840]: I1209 17:55:06.877847 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.014895 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-catalog-content\") pod \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.015018 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-utilities\") pod \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.015184 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slb4c\" (UniqueName: \"kubernetes.io/projected/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-kube-api-access-slb4c\") pod \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\" (UID: \"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d\") " Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.015939 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-utilities" (OuterVolumeSpecName: "utilities") pod "d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" (UID: "d8a468c2-c0f3-4dc9-94bd-6ffc724a241d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.025506 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-kube-api-access-slb4c" (OuterVolumeSpecName: "kube-api-access-slb4c") pod "d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" (UID: "d8a468c2-c0f3-4dc9-94bd-6ffc724a241d"). InnerVolumeSpecName "kube-api-access-slb4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.067393 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" (UID: "d8a468c2-c0f3-4dc9-94bd-6ffc724a241d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.118922 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.119024 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.119058 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slb4c\" (UniqueName: \"kubernetes.io/projected/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d-kube-api-access-slb4c\") on node \"crc\" DevicePath \"\"" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.181639 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nlmbr"] Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.181903 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nlmbr" podUID="1285ea6e-8612-4b0f-adad-d93db6553569" containerName="registry-server" containerID="cri-o://a7c863e65f48387d426bba34f8371acabec4f51913e797cc98e4679cdbee24c0" gracePeriod=2 Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.339905 4840 generic.go:334] "Generic (PLEG): container finished" podID="1285ea6e-8612-4b0f-adad-d93db6553569" containerID="a7c863e65f48387d426bba34f8371acabec4f51913e797cc98e4679cdbee24c0" exitCode=0 Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.340025 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlmbr" event={"ID":"1285ea6e-8612-4b0f-adad-d93db6553569","Type":"ContainerDied","Data":"a7c863e65f48387d426bba34f8371acabec4f51913e797cc98e4679cdbee24c0"} Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.343555 4840 generic.go:334] "Generic (PLEG): container finished" podID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerID="f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671" exitCode=0 Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.343690 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bp9kx" event={"ID":"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d","Type":"ContainerDied","Data":"f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671"} Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.343778 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bp9kx" event={"ID":"d8a468c2-c0f3-4dc9-94bd-6ffc724a241d","Type":"ContainerDied","Data":"51f38e394b179b60ad854262518d2201a3e28abd3fad4a73f4c20f779e944bd8"} Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.343707 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bp9kx" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.343822 4840 scope.go:117] "RemoveContainer" containerID="f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.387571 4840 scope.go:117] "RemoveContainer" containerID="efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.390852 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bp9kx"] Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.400206 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bp9kx"] Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.425839 4840 scope.go:117] "RemoveContainer" containerID="7c3e02c007ac58f76fd2af3f1adac3412cd215880ef92b4ac9ab9889132a0d31" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.456815 4840 scope.go:117] "RemoveContainer" containerID="f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671" Dec 09 17:55:07 crc kubenswrapper[4840]: E1209 17:55:07.457386 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671\": container with ID starting with f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671 not found: ID does not exist" containerID="f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.457434 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671"} err="failed to get container status \"f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671\": rpc error: code = NotFound desc = could not find container \"f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671\": container with ID starting with f64cf2115447e0d0589edc42a58f705bb3e070f68ed583517d2bf0f78ac39671 not found: ID does not exist" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.457461 4840 scope.go:117] "RemoveContainer" containerID="efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8" Dec 09 17:55:07 crc kubenswrapper[4840]: E1209 17:55:07.458264 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8\": container with ID starting with efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8 not found: ID does not exist" containerID="efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.458303 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8"} err="failed to get container status \"efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8\": rpc error: code = NotFound desc = could not find container \"efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8\": container with ID starting with efc079fadc35419bf20cc136748a2b341b305ae4daa90c9ae6317e2c1e512ef8 not found: ID does not exist" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.458328 4840 scope.go:117] "RemoveContainer" containerID="7c3e02c007ac58f76fd2af3f1adac3412cd215880ef92b4ac9ab9889132a0d31" Dec 09 17:55:07 crc kubenswrapper[4840]: E1209 17:55:07.458580 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c3e02c007ac58f76fd2af3f1adac3412cd215880ef92b4ac9ab9889132a0d31\": container with ID starting with 7c3e02c007ac58f76fd2af3f1adac3412cd215880ef92b4ac9ab9889132a0d31 not found: ID does not exist" containerID="7c3e02c007ac58f76fd2af3f1adac3412cd215880ef92b4ac9ab9889132a0d31" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.458602 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c3e02c007ac58f76fd2af3f1adac3412cd215880ef92b4ac9ab9889132a0d31"} err="failed to get container status \"7c3e02c007ac58f76fd2af3f1adac3412cd215880ef92b4ac9ab9889132a0d31\": rpc error: code = NotFound desc = could not find container \"7c3e02c007ac58f76fd2af3f1adac3412cd215880ef92b4ac9ab9889132a0d31\": container with ID starting with 7c3e02c007ac58f76fd2af3f1adac3412cd215880ef92b4ac9ab9889132a0d31 not found: ID does not exist" Dec 09 17:55:07 crc kubenswrapper[4840]: E1209 17:55:07.610206 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.735337 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.833336 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-catalog-content\") pod \"1285ea6e-8612-4b0f-adad-d93db6553569\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.833457 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wqlt\" (UniqueName: \"kubernetes.io/projected/1285ea6e-8612-4b0f-adad-d93db6553569-kube-api-access-7wqlt\") pod \"1285ea6e-8612-4b0f-adad-d93db6553569\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.833512 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-utilities\") pod \"1285ea6e-8612-4b0f-adad-d93db6553569\" (UID: \"1285ea6e-8612-4b0f-adad-d93db6553569\") " Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.834039 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-utilities" (OuterVolumeSpecName: "utilities") pod "1285ea6e-8612-4b0f-adad-d93db6553569" (UID: "1285ea6e-8612-4b0f-adad-d93db6553569"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.839356 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1285ea6e-8612-4b0f-adad-d93db6553569-kube-api-access-7wqlt" (OuterVolumeSpecName: "kube-api-access-7wqlt") pod "1285ea6e-8612-4b0f-adad-d93db6553569" (UID: "1285ea6e-8612-4b0f-adad-d93db6553569"). InnerVolumeSpecName "kube-api-access-7wqlt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.917957 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1285ea6e-8612-4b0f-adad-d93db6553569" (UID: "1285ea6e-8612-4b0f-adad-d93db6553569"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.936368 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.936406 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wqlt\" (UniqueName: \"kubernetes.io/projected/1285ea6e-8612-4b0f-adad-d93db6553569-kube-api-access-7wqlt\") on node \"crc\" DevicePath \"\"" Dec 09 17:55:07 crc kubenswrapper[4840]: I1209 17:55:07.936420 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1285ea6e-8612-4b0f-adad-d93db6553569-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:55:08 crc kubenswrapper[4840]: I1209 17:55:08.355233 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nlmbr" Dec 09 17:55:08 crc kubenswrapper[4840]: I1209 17:55:08.355247 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlmbr" event={"ID":"1285ea6e-8612-4b0f-adad-d93db6553569","Type":"ContainerDied","Data":"e109f5c3dbe2bbc28ff0655786e76067831596d06b5f7f23f87b5df856b88082"} Dec 09 17:55:08 crc kubenswrapper[4840]: I1209 17:55:08.356077 4840 scope.go:117] "RemoveContainer" containerID="a7c863e65f48387d426bba34f8371acabec4f51913e797cc98e4679cdbee24c0" Dec 09 17:55:08 crc kubenswrapper[4840]: I1209 17:55:08.377191 4840 scope.go:117] "RemoveContainer" containerID="9760c86ca8f9c83b7fd96fcce78a7c1dbb7c0fb70d0b51eb7ce84d8a8e4e3f86" Dec 09 17:55:08 crc kubenswrapper[4840]: I1209 17:55:08.393690 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nlmbr"] Dec 09 17:55:08 crc kubenswrapper[4840]: I1209 17:55:08.403609 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nlmbr"] Dec 09 17:55:08 crc kubenswrapper[4840]: I1209 17:55:08.410906 4840 scope.go:117] "RemoveContainer" containerID="63714f8c3ade66a80909e91568beb450e00cf714be0ea6115aa0f70d021b25a1" Dec 09 17:55:08 crc kubenswrapper[4840]: I1209 17:55:08.619931 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1285ea6e-8612-4b0f-adad-d93db6553569" path="/var/lib/kubelet/pods/1285ea6e-8612-4b0f-adad-d93db6553569/volumes" Dec 09 17:55:08 crc kubenswrapper[4840]: I1209 17:55:08.621726 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" path="/var/lib/kubelet/pods/d8a468c2-c0f3-4dc9-94bd-6ffc724a241d/volumes" Dec 09 17:55:09 crc kubenswrapper[4840]: I1209 17:55:09.143881 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:55:09 crc kubenswrapper[4840]: I1209 17:55:09.201626 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:55:11 crc kubenswrapper[4840]: I1209 17:55:11.586834 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x9m7b"] Dec 09 17:55:11 crc kubenswrapper[4840]: I1209 17:55:11.587472 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-x9m7b" podUID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerName="registry-server" containerID="cri-o://759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f" gracePeriod=2 Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.147698 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.223119 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-utilities\") pod \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.223264 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zf4bs\" (UniqueName: \"kubernetes.io/projected/3cc0ccf1-81a0-497a-9049-72af31bb07ce-kube-api-access-zf4bs\") pod \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.223477 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-catalog-content\") pod \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\" (UID: \"3cc0ccf1-81a0-497a-9049-72af31bb07ce\") " Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.224541 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-utilities" (OuterVolumeSpecName: "utilities") pod "3cc0ccf1-81a0-497a-9049-72af31bb07ce" (UID: "3cc0ccf1-81a0-497a-9049-72af31bb07ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.235255 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cc0ccf1-81a0-497a-9049-72af31bb07ce-kube-api-access-zf4bs" (OuterVolumeSpecName: "kube-api-access-zf4bs") pod "3cc0ccf1-81a0-497a-9049-72af31bb07ce" (UID: "3cc0ccf1-81a0-497a-9049-72af31bb07ce"). InnerVolumeSpecName "kube-api-access-zf4bs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.247668 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3cc0ccf1-81a0-497a-9049-72af31bb07ce" (UID: "3cc0ccf1-81a0-497a-9049-72af31bb07ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.333451 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.333499 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zf4bs\" (UniqueName: \"kubernetes.io/projected/3cc0ccf1-81a0-497a-9049-72af31bb07ce-kube-api-access-zf4bs\") on node \"crc\" DevicePath \"\"" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.333513 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cc0ccf1-81a0-497a-9049-72af31bb07ce-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.403951 4840 generic.go:334] "Generic (PLEG): container finished" podID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerID="759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f" exitCode=0 Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.404022 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9m7b" event={"ID":"3cc0ccf1-81a0-497a-9049-72af31bb07ce","Type":"ContainerDied","Data":"759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f"} Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.404052 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9m7b" event={"ID":"3cc0ccf1-81a0-497a-9049-72af31bb07ce","Type":"ContainerDied","Data":"081d7a4f6415779744966ef281e615bf718fab66746772e03a0e18ede1b5cf6e"} Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.404076 4840 scope.go:117] "RemoveContainer" containerID="759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.404221 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x9m7b" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.431570 4840 scope.go:117] "RemoveContainer" containerID="11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.437867 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x9m7b"] Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.448386 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-x9m7b"] Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.462197 4840 scope.go:117] "RemoveContainer" containerID="4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.507463 4840 scope.go:117] "RemoveContainer" containerID="759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f" Dec 09 17:55:12 crc kubenswrapper[4840]: E1209 17:55:12.507760 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f\": container with ID starting with 759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f not found: ID does not exist" containerID="759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.507788 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f"} err="failed to get container status \"759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f\": rpc error: code = NotFound desc = could not find container \"759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f\": container with ID starting with 759620fe5e9f9116728a4804e897e37929cac50c886ff0b96abe51cd4928418f not found: ID does not exist" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.507807 4840 scope.go:117] "RemoveContainer" containerID="11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408" Dec 09 17:55:12 crc kubenswrapper[4840]: E1209 17:55:12.507989 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408\": container with ID starting with 11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408 not found: ID does not exist" containerID="11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.508012 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408"} err="failed to get container status \"11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408\": rpc error: code = NotFound desc = could not find container \"11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408\": container with ID starting with 11e479026adf9b4c6d2f62172aaf128744dc6a9bbba0704982b2e4f3b9687408 not found: ID does not exist" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.508027 4840 scope.go:117] "RemoveContainer" containerID="4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158" Dec 09 17:55:12 crc kubenswrapper[4840]: E1209 17:55:12.508207 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158\": container with ID starting with 4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158 not found: ID does not exist" containerID="4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.508226 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158"} err="failed to get container status \"4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158\": rpc error: code = NotFound desc = could not find container \"4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158\": container with ID starting with 4c47634848bda60b7e57b2f0feabc2aa5057c0263e1c1d1d1979ccf78320c158 not found: ID does not exist" Dec 09 17:55:12 crc kubenswrapper[4840]: I1209 17:55:12.620128 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" path="/var/lib/kubelet/pods/3cc0ccf1-81a0-497a-9049-72af31bb07ce/volumes" Dec 09 17:55:14 crc kubenswrapper[4840]: E1209 17:55:14.620238 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.041915 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw"] Dec 09 17:55:15 crc kubenswrapper[4840]: E1209 17:55:15.042375 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerName="registry-server" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042398 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerName="registry-server" Dec 09 17:55:15 crc kubenswrapper[4840]: E1209 17:55:15.042407 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1285ea6e-8612-4b0f-adad-d93db6553569" containerName="extract-utilities" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042413 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1285ea6e-8612-4b0f-adad-d93db6553569" containerName="extract-utilities" Dec 09 17:55:15 crc kubenswrapper[4840]: E1209 17:55:15.042432 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerName="extract-content" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042439 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerName="extract-content" Dec 09 17:55:15 crc kubenswrapper[4840]: E1209 17:55:15.042448 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1285ea6e-8612-4b0f-adad-d93db6553569" containerName="extract-content" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042453 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1285ea6e-8612-4b0f-adad-d93db6553569" containerName="extract-content" Dec 09 17:55:15 crc kubenswrapper[4840]: E1209 17:55:15.042465 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1285ea6e-8612-4b0f-adad-d93db6553569" containerName="registry-server" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042470 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1285ea6e-8612-4b0f-adad-d93db6553569" containerName="registry-server" Dec 09 17:55:15 crc kubenswrapper[4840]: E1209 17:55:15.042480 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerName="extract-content" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042485 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerName="extract-content" Dec 09 17:55:15 crc kubenswrapper[4840]: E1209 17:55:15.042501 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerName="registry-server" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042506 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerName="registry-server" Dec 09 17:55:15 crc kubenswrapper[4840]: E1209 17:55:15.042519 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerName="extract-utilities" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042525 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerName="extract-utilities" Dec 09 17:55:15 crc kubenswrapper[4840]: E1209 17:55:15.042540 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerName="extract-utilities" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042545 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerName="extract-utilities" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042728 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1285ea6e-8612-4b0f-adad-d93db6553569" containerName="registry-server" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042743 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cc0ccf1-81a0-497a-9049-72af31bb07ce" containerName="registry-server" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.042755 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8a468c2-c0f3-4dc9-94bd-6ffc724a241d" containerName="registry-server" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.043688 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.049155 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.049339 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.049843 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qrgfg" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.050160 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.054797 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw"] Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.128700 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8zqv\" (UniqueName: \"kubernetes.io/projected/feab3b6f-2a30-4db6-af22-22ecea863f88-kube-api-access-h8zqv\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nkddw\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.128833 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nkddw\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.128921 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nkddw\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.231001 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nkddw\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.231162 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nkddw\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.231999 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8zqv\" (UniqueName: \"kubernetes.io/projected/feab3b6f-2a30-4db6-af22-22ecea863f88-kube-api-access-h8zqv\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nkddw\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.236257 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nkddw\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.236287 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nkddw\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.254491 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8zqv\" (UniqueName: \"kubernetes.io/projected/feab3b6f-2a30-4db6-af22-22ecea863f88-kube-api-access-h8zqv\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nkddw\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.362919 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 17:55:15 crc kubenswrapper[4840]: I1209 17:55:15.975726 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw"] Dec 09 17:55:16 crc kubenswrapper[4840]: I1209 17:55:16.443539 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" event={"ID":"feab3b6f-2a30-4db6-af22-22ecea863f88","Type":"ContainerStarted","Data":"ef3869de851e3a549e6ea36c1db94c10d79a42d750f04175743def4bd506aed2"} Dec 09 17:55:17 crc kubenswrapper[4840]: I1209 17:55:17.455655 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" event={"ID":"feab3b6f-2a30-4db6-af22-22ecea863f88","Type":"ContainerStarted","Data":"7fd6edb9fd3fd746151e181faca7140dfc2f61c710ffc0fafe269065b6bbffc0"} Dec 09 17:55:17 crc kubenswrapper[4840]: I1209 17:55:17.473397 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" podStartSLOduration=1.9142081800000001 podStartE2EDuration="2.473374679s" podCreationTimestamp="2025-12-09 17:55:15 +0000 UTC" firstStartedPulling="2025-12-09 17:55:15.980486884 +0000 UTC m=+3501.971597527" lastFinishedPulling="2025-12-09 17:55:16.539653393 +0000 UTC m=+3502.530764026" observedRunningTime="2025-12-09 17:55:17.471434504 +0000 UTC m=+3503.462545137" watchObservedRunningTime="2025-12-09 17:55:17.473374679 +0000 UTC m=+3503.464485312" Dec 09 17:55:20 crc kubenswrapper[4840]: E1209 17:55:20.609842 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:55:29 crc kubenswrapper[4840]: E1209 17:55:29.611215 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:55:34 crc kubenswrapper[4840]: I1209 17:55:34.036389 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:55:34 crc kubenswrapper[4840]: I1209 17:55:34.037053 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:55:34 crc kubenswrapper[4840]: E1209 17:55:34.624856 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:55:41 crc kubenswrapper[4840]: E1209 17:55:41.610688 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:55:47 crc kubenswrapper[4840]: E1209 17:55:47.609913 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:55:52 crc kubenswrapper[4840]: E1209 17:55:52.610698 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:55:58 crc kubenswrapper[4840]: E1209 17:55:58.611527 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:56:04 crc kubenswrapper[4840]: I1209 17:56:04.036149 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:56:04 crc kubenswrapper[4840]: I1209 17:56:04.036624 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:56:06 crc kubenswrapper[4840]: E1209 17:56:06.612145 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:56:10 crc kubenswrapper[4840]: E1209 17:56:10.610814 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:56:17 crc kubenswrapper[4840]: E1209 17:56:17.611089 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:56:22 crc kubenswrapper[4840]: E1209 17:56:22.611014 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:56:31 crc kubenswrapper[4840]: E1209 17:56:31.611945 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:56:33 crc kubenswrapper[4840]: E1209 17:56:33.611038 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:56:34 crc kubenswrapper[4840]: I1209 17:56:34.036007 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 17:56:34 crc kubenswrapper[4840]: I1209 17:56:34.036072 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 17:56:34 crc kubenswrapper[4840]: I1209 17:56:34.036116 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 17:56:34 crc kubenswrapper[4840]: I1209 17:56:34.036823 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 17:56:34 crc kubenswrapper[4840]: I1209 17:56:34.036882 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" gracePeriod=600 Dec 09 17:56:34 crc kubenswrapper[4840]: E1209 17:56:34.155502 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:56:34 crc kubenswrapper[4840]: I1209 17:56:34.179098 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" exitCode=0 Dec 09 17:56:34 crc kubenswrapper[4840]: I1209 17:56:34.179172 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc"} Dec 09 17:56:34 crc kubenswrapper[4840]: I1209 17:56:34.179241 4840 scope.go:117] "RemoveContainer" containerID="5f2325a33fc55da866851010414ceafb8876b8e6fda7106c24025a42fa4f0232" Dec 09 17:56:34 crc kubenswrapper[4840]: I1209 17:56:34.180198 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:56:34 crc kubenswrapper[4840]: E1209 17:56:34.180500 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:56:44 crc kubenswrapper[4840]: I1209 17:56:44.614795 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:56:44 crc kubenswrapper[4840]: E1209 17:56:44.615565 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:56:45 crc kubenswrapper[4840]: E1209 17:56:45.611063 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:56:45 crc kubenswrapper[4840]: E1209 17:56:45.611070 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:56:55 crc kubenswrapper[4840]: I1209 17:56:55.608731 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:56:55 crc kubenswrapper[4840]: E1209 17:56:55.609442 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:56:56 crc kubenswrapper[4840]: E1209 17:56:56.611087 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:56:59 crc kubenswrapper[4840]: E1209 17:56:59.609996 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:57:08 crc kubenswrapper[4840]: I1209 17:57:08.608197 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:57:08 crc kubenswrapper[4840]: E1209 17:57:08.608923 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:57:10 crc kubenswrapper[4840]: E1209 17:57:10.611005 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:57:11 crc kubenswrapper[4840]: E1209 17:57:11.610603 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:57:21 crc kubenswrapper[4840]: E1209 17:57:21.611095 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:57:22 crc kubenswrapper[4840]: E1209 17:57:22.611739 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:57:23 crc kubenswrapper[4840]: I1209 17:57:23.609380 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:57:23 crc kubenswrapper[4840]: E1209 17:57:23.609747 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:57:34 crc kubenswrapper[4840]: E1209 17:57:34.616890 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:57:35 crc kubenswrapper[4840]: E1209 17:57:35.610725 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:57:36 crc kubenswrapper[4840]: I1209 17:57:36.608952 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:57:36 crc kubenswrapper[4840]: E1209 17:57:36.609238 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:57:48 crc kubenswrapper[4840]: E1209 17:57:48.611172 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:57:48 crc kubenswrapper[4840]: E1209 17:57:48.611704 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:57:50 crc kubenswrapper[4840]: I1209 17:57:50.609203 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:57:50 crc kubenswrapper[4840]: E1209 17:57:50.609680 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:57:59 crc kubenswrapper[4840]: I1209 17:57:59.610642 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 17:57:59 crc kubenswrapper[4840]: E1209 17:57:59.722925 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:57:59 crc kubenswrapper[4840]: E1209 17:57:59.723005 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 17:57:59 crc kubenswrapper[4840]: E1209 17:57:59.723135 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:57:59 crc kubenswrapper[4840]: E1209 17:57:59.724331 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:58:02 crc kubenswrapper[4840]: I1209 17:58:02.609663 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:58:02 crc kubenswrapper[4840]: E1209 17:58:02.610691 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:58:03 crc kubenswrapper[4840]: E1209 17:58:03.611655 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:58:10 crc kubenswrapper[4840]: E1209 17:58:10.609991 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:58:14 crc kubenswrapper[4840]: E1209 17:58:14.623432 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:58:17 crc kubenswrapper[4840]: I1209 17:58:17.609415 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:58:17 crc kubenswrapper[4840]: E1209 17:58:17.609914 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:58:23 crc kubenswrapper[4840]: E1209 17:58:23.610290 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:58:28 crc kubenswrapper[4840]: E1209 17:58:28.730468 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:58:28 crc kubenswrapper[4840]: E1209 17:58:28.731031 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 17:58:28 crc kubenswrapper[4840]: E1209 17:58:28.731237 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 17:58:28 crc kubenswrapper[4840]: E1209 17:58:28.732733 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:58:29 crc kubenswrapper[4840]: I1209 17:58:29.608841 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:58:29 crc kubenswrapper[4840]: E1209 17:58:29.609477 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:58:36 crc kubenswrapper[4840]: E1209 17:58:36.611856 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:58:41 crc kubenswrapper[4840]: E1209 17:58:41.611239 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:58:42 crc kubenswrapper[4840]: I1209 17:58:42.608676 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:58:42 crc kubenswrapper[4840]: E1209 17:58:42.609238 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:58:48 crc kubenswrapper[4840]: E1209 17:58:48.610243 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:58:54 crc kubenswrapper[4840]: I1209 17:58:54.619789 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:58:54 crc kubenswrapper[4840]: E1209 17:58:54.620515 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:58:55 crc kubenswrapper[4840]: E1209 17:58:55.611305 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:59:01 crc kubenswrapper[4840]: E1209 17:59:01.610773 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:59:06 crc kubenswrapper[4840]: E1209 17:59:06.610361 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:59:07 crc kubenswrapper[4840]: I1209 17:59:07.610050 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:59:07 crc kubenswrapper[4840]: E1209 17:59:07.610434 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:59:15 crc kubenswrapper[4840]: E1209 17:59:15.610753 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:59:17 crc kubenswrapper[4840]: E1209 17:59:17.610988 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:59:20 crc kubenswrapper[4840]: I1209 17:59:20.608673 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:59:20 crc kubenswrapper[4840]: E1209 17:59:20.609184 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:59:29 crc kubenswrapper[4840]: E1209 17:59:29.610264 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:59:29 crc kubenswrapper[4840]: E1209 17:59:29.610438 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:59:32 crc kubenswrapper[4840]: I1209 17:59:32.609560 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:59:32 crc kubenswrapper[4840]: E1209 17:59:32.610722 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:59:40 crc kubenswrapper[4840]: E1209 17:59:40.611586 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:59:40 crc kubenswrapper[4840]: E1209 17:59:40.611894 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:59:43 crc kubenswrapper[4840]: I1209 17:59:43.608907 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:59:43 crc kubenswrapper[4840]: E1209 17:59:43.609487 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 17:59:51 crc kubenswrapper[4840]: E1209 17:59:51.610148 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 17:59:53 crc kubenswrapper[4840]: E1209 17:59:53.610784 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 17:59:58 crc kubenswrapper[4840]: I1209 17:59:58.609210 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 17:59:58 crc kubenswrapper[4840]: E1209 17:59:58.609664 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.181822 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z"] Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.183575 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.185945 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.188190 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.195110 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z"] Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.316544 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-secret-volume\") pod \"collect-profiles-29421720-5sw9z\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.316640 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-config-volume\") pod \"collect-profiles-29421720-5sw9z\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.316852 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zd5vr\" (UniqueName: \"kubernetes.io/projected/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-kube-api-access-zd5vr\") pod \"collect-profiles-29421720-5sw9z\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.419530 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zd5vr\" (UniqueName: \"kubernetes.io/projected/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-kube-api-access-zd5vr\") pod \"collect-profiles-29421720-5sw9z\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.419605 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-secret-volume\") pod \"collect-profiles-29421720-5sw9z\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.419656 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-config-volume\") pod \"collect-profiles-29421720-5sw9z\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.420462 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-config-volume\") pod \"collect-profiles-29421720-5sw9z\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.426292 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-secret-volume\") pod \"collect-profiles-29421720-5sw9z\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.439990 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zd5vr\" (UniqueName: \"kubernetes.io/projected/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-kube-api-access-zd5vr\") pod \"collect-profiles-29421720-5sw9z\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.513904 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:00 crc kubenswrapper[4840]: I1209 18:00:00.957137 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z"] Dec 09 18:00:01 crc kubenswrapper[4840]: I1209 18:00:01.301548 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" event={"ID":"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724","Type":"ContainerStarted","Data":"4dc833c20bd3ed1ec6d0c478bbfee02f96156cd7b34c496e1164f534b02bc1d4"} Dec 09 18:00:01 crc kubenswrapper[4840]: I1209 18:00:01.301843 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" event={"ID":"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724","Type":"ContainerStarted","Data":"0dc36d8e8f592516fbb40cf5f4aaf7b12ac10a800ae6f72b7d08003933731219"} Dec 09 18:00:01 crc kubenswrapper[4840]: I1209 18:00:01.315933 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" podStartSLOduration=1.315915708 podStartE2EDuration="1.315915708s" podCreationTimestamp="2025-12-09 18:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 18:00:01.314159148 +0000 UTC m=+3787.305269781" watchObservedRunningTime="2025-12-09 18:00:01.315915708 +0000 UTC m=+3787.307026341" Dec 09 18:00:02 crc kubenswrapper[4840]: I1209 18:00:02.325079 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" event={"ID":"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724","Type":"ContainerDied","Data":"4dc833c20bd3ed1ec6d0c478bbfee02f96156cd7b34c496e1164f534b02bc1d4"} Dec 09 18:00:02 crc kubenswrapper[4840]: I1209 18:00:02.324902 4840 generic.go:334] "Generic (PLEG): container finished" podID="3a0ab1cf-7ef3-4ada-85a1-0dba4948f724" containerID="4dc833c20bd3ed1ec6d0c478bbfee02f96156cd7b34c496e1164f534b02bc1d4" exitCode=0 Dec 09 18:00:03 crc kubenswrapper[4840]: I1209 18:00:03.760628 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:03 crc kubenswrapper[4840]: I1209 18:00:03.896328 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zd5vr\" (UniqueName: \"kubernetes.io/projected/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-kube-api-access-zd5vr\") pod \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " Dec 09 18:00:03 crc kubenswrapper[4840]: I1209 18:00:03.896476 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-config-volume\") pod \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " Dec 09 18:00:03 crc kubenswrapper[4840]: I1209 18:00:03.896580 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-secret-volume\") pod \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\" (UID: \"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724\") " Dec 09 18:00:03 crc kubenswrapper[4840]: I1209 18:00:03.897327 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-config-volume" (OuterVolumeSpecName: "config-volume") pod "3a0ab1cf-7ef3-4ada-85a1-0dba4948f724" (UID: "3a0ab1cf-7ef3-4ada-85a1-0dba4948f724"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 18:00:03 crc kubenswrapper[4840]: I1209 18:00:03.902319 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3a0ab1cf-7ef3-4ada-85a1-0dba4948f724" (UID: "3a0ab1cf-7ef3-4ada-85a1-0dba4948f724"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:00:03 crc kubenswrapper[4840]: I1209 18:00:03.902724 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-kube-api-access-zd5vr" (OuterVolumeSpecName: "kube-api-access-zd5vr") pod "3a0ab1cf-7ef3-4ada-85a1-0dba4948f724" (UID: "3a0ab1cf-7ef3-4ada-85a1-0dba4948f724"). InnerVolumeSpecName "kube-api-access-zd5vr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:00:03 crc kubenswrapper[4840]: I1209 18:00:03.999515 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zd5vr\" (UniqueName: \"kubernetes.io/projected/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-kube-api-access-zd5vr\") on node \"crc\" DevicePath \"\"" Dec 09 18:00:03 crc kubenswrapper[4840]: I1209 18:00:03.999562 4840 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 18:00:03 crc kubenswrapper[4840]: I1209 18:00:03.999575 4840 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a0ab1cf-7ef3-4ada-85a1-0dba4948f724-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 18:00:04 crc kubenswrapper[4840]: I1209 18:00:04.352823 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" event={"ID":"3a0ab1cf-7ef3-4ada-85a1-0dba4948f724","Type":"ContainerDied","Data":"0dc36d8e8f592516fbb40cf5f4aaf7b12ac10a800ae6f72b7d08003933731219"} Dec 09 18:00:04 crc kubenswrapper[4840]: I1209 18:00:04.352867 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0dc36d8e8f592516fbb40cf5f4aaf7b12ac10a800ae6f72b7d08003933731219" Dec 09 18:00:04 crc kubenswrapper[4840]: I1209 18:00:04.352927 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421720-5sw9z" Dec 09 18:00:04 crc kubenswrapper[4840]: I1209 18:00:04.402855 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l"] Dec 09 18:00:04 crc kubenswrapper[4840]: I1209 18:00:04.411456 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421675-4lw2l"] Dec 09 18:00:04 crc kubenswrapper[4840]: E1209 18:00:04.617397 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:00:04 crc kubenswrapper[4840]: I1209 18:00:04.631351 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="576e7f6a-1593-456a-8530-2fe9909fa1b3" path="/var/lib/kubelet/pods/576e7f6a-1593-456a-8530-2fe9909fa1b3/volumes" Dec 09 18:00:08 crc kubenswrapper[4840]: E1209 18:00:08.611334 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:00:12 crc kubenswrapper[4840]: I1209 18:00:12.608810 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 18:00:12 crc kubenswrapper[4840]: E1209 18:00:12.610685 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:00:12 crc kubenswrapper[4840]: I1209 18:00:12.623066 4840 scope.go:117] "RemoveContainer" containerID="314b1bb38798c7ca9d98f134281cc7a931655fab16e93ca327c42011739fc918" Dec 09 18:00:19 crc kubenswrapper[4840]: E1209 18:00:19.611099 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:00:19 crc kubenswrapper[4840]: E1209 18:00:19.611224 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:00:27 crc kubenswrapper[4840]: I1209 18:00:27.609885 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 18:00:27 crc kubenswrapper[4840]: E1209 18:00:27.610595 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:00:32 crc kubenswrapper[4840]: E1209 18:00:32.611529 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:00:32 crc kubenswrapper[4840]: E1209 18:00:32.611653 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:00:38 crc kubenswrapper[4840]: I1209 18:00:38.608629 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 18:00:38 crc kubenswrapper[4840]: E1209 18:00:38.609269 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:00:43 crc kubenswrapper[4840]: E1209 18:00:43.611668 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:00:45 crc kubenswrapper[4840]: E1209 18:00:45.611455 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:00:51 crc kubenswrapper[4840]: I1209 18:00:51.608890 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 18:00:51 crc kubenswrapper[4840]: E1209 18:00:51.609805 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:00:54 crc kubenswrapper[4840]: E1209 18:00:54.617979 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.159642 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29421721-t7pw8"] Dec 09 18:01:00 crc kubenswrapper[4840]: E1209 18:01:00.160680 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a0ab1cf-7ef3-4ada-85a1-0dba4948f724" containerName="collect-profiles" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.160698 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a0ab1cf-7ef3-4ada-85a1-0dba4948f724" containerName="collect-profiles" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.160998 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a0ab1cf-7ef3-4ada-85a1-0dba4948f724" containerName="collect-profiles" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.161870 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.215646 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29421721-t7pw8"] Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.249782 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-combined-ca-bundle\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.249907 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-492bh\" (UniqueName: \"kubernetes.io/projected/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-kube-api-access-492bh\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.250106 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-config-data\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.250129 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-fernet-keys\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.351683 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-fernet-keys\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.351732 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-config-data\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.351798 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-combined-ca-bundle\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.351882 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-492bh\" (UniqueName: \"kubernetes.io/projected/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-kube-api-access-492bh\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.371387 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-config-data\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.371552 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-fernet-keys\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.372688 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-combined-ca-bundle\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.373821 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-492bh\" (UniqueName: \"kubernetes.io/projected/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-kube-api-access-492bh\") pod \"keystone-cron-29421721-t7pw8\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.485983 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:00 crc kubenswrapper[4840]: E1209 18:01:00.616801 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:01:00 crc kubenswrapper[4840]: I1209 18:01:00.933666 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29421721-t7pw8"] Dec 09 18:01:01 crc kubenswrapper[4840]: I1209 18:01:01.944125 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29421721-t7pw8" event={"ID":"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6","Type":"ContainerStarted","Data":"a9e6bd8f624cfac5063b1a4bd873730ebc45d43ba0a88206a5c9ae153830102e"} Dec 09 18:01:01 crc kubenswrapper[4840]: I1209 18:01:01.944464 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29421721-t7pw8" event={"ID":"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6","Type":"ContainerStarted","Data":"a9cd82982d24fe458d843a8304afdd7a78298253ec780f6f665780c2a23146f5"} Dec 09 18:01:01 crc kubenswrapper[4840]: I1209 18:01:01.967026 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29421721-t7pw8" podStartSLOduration=1.966947703 podStartE2EDuration="1.966947703s" podCreationTimestamp="2025-12-09 18:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 18:01:01.965704087 +0000 UTC m=+3847.956814720" watchObservedRunningTime="2025-12-09 18:01:01.966947703 +0000 UTC m=+3847.958058376" Dec 09 18:01:02 crc kubenswrapper[4840]: I1209 18:01:02.609321 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 18:01:02 crc kubenswrapper[4840]: E1209 18:01:02.609686 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:01:03 crc kubenswrapper[4840]: I1209 18:01:03.969512 4840 generic.go:334] "Generic (PLEG): container finished" podID="fd6ba15a-4332-4bf1-87f7-c0ab97b100e6" containerID="a9e6bd8f624cfac5063b1a4bd873730ebc45d43ba0a88206a5c9ae153830102e" exitCode=0 Dec 09 18:01:03 crc kubenswrapper[4840]: I1209 18:01:03.969602 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29421721-t7pw8" event={"ID":"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6","Type":"ContainerDied","Data":"a9e6bd8f624cfac5063b1a4bd873730ebc45d43ba0a88206a5c9ae153830102e"} Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.434480 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.581066 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-combined-ca-bundle\") pod \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.581147 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-fernet-keys\") pod \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.581312 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-config-data\") pod \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.581412 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-492bh\" (UniqueName: \"kubernetes.io/projected/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-kube-api-access-492bh\") pod \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\" (UID: \"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6\") " Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.586007 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "fd6ba15a-4332-4bf1-87f7-c0ab97b100e6" (UID: "fd6ba15a-4332-4bf1-87f7-c0ab97b100e6"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.586544 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-kube-api-access-492bh" (OuterVolumeSpecName: "kube-api-access-492bh") pod "fd6ba15a-4332-4bf1-87f7-c0ab97b100e6" (UID: "fd6ba15a-4332-4bf1-87f7-c0ab97b100e6"). InnerVolumeSpecName "kube-api-access-492bh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.616306 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fd6ba15a-4332-4bf1-87f7-c0ab97b100e6" (UID: "fd6ba15a-4332-4bf1-87f7-c0ab97b100e6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.652607 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-config-data" (OuterVolumeSpecName: "config-data") pod "fd6ba15a-4332-4bf1-87f7-c0ab97b100e6" (UID: "fd6ba15a-4332-4bf1-87f7-c0ab97b100e6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.683737 4840 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.684126 4840 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.684160 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-492bh\" (UniqueName: \"kubernetes.io/projected/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-kube-api-access-492bh\") on node \"crc\" DevicePath \"\"" Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.684204 4840 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd6ba15a-4332-4bf1-87f7-c0ab97b100e6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.992527 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29421721-t7pw8" event={"ID":"fd6ba15a-4332-4bf1-87f7-c0ab97b100e6","Type":"ContainerDied","Data":"a9cd82982d24fe458d843a8304afdd7a78298253ec780f6f665780c2a23146f5"} Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.992573 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9cd82982d24fe458d843a8304afdd7a78298253ec780f6f665780c2a23146f5" Dec 09 18:01:05 crc kubenswrapper[4840]: I1209 18:01:05.992585 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29421721-t7pw8" Dec 09 18:01:07 crc kubenswrapper[4840]: E1209 18:01:07.611570 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:01:14 crc kubenswrapper[4840]: E1209 18:01:14.621069 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:01:16 crc kubenswrapper[4840]: I1209 18:01:16.608578 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 18:01:16 crc kubenswrapper[4840]: E1209 18:01:16.609110 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:01:18 crc kubenswrapper[4840]: E1209 18:01:18.611999 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:01:27 crc kubenswrapper[4840]: I1209 18:01:27.609322 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 18:01:27 crc kubenswrapper[4840]: E1209 18:01:27.610152 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:01:29 crc kubenswrapper[4840]: E1209 18:01:29.610643 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:01:31 crc kubenswrapper[4840]: E1209 18:01:31.610574 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:01:39 crc kubenswrapper[4840]: I1209 18:01:39.372628 4840 generic.go:334] "Generic (PLEG): container finished" podID="feab3b6f-2a30-4db6-af22-22ecea863f88" containerID="7fd6edb9fd3fd746151e181faca7140dfc2f61c710ffc0fafe269065b6bbffc0" exitCode=2 Dec 09 18:01:39 crc kubenswrapper[4840]: I1209 18:01:39.372713 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" event={"ID":"feab3b6f-2a30-4db6-af22-22ecea863f88","Type":"ContainerDied","Data":"7fd6edb9fd3fd746151e181faca7140dfc2f61c710ffc0fafe269065b6bbffc0"} Dec 09 18:01:39 crc kubenswrapper[4840]: I1209 18:01:39.608190 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 18:01:40 crc kubenswrapper[4840]: I1209 18:01:40.943292 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.087585 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-ssh-key\") pod \"feab3b6f-2a30-4db6-af22-22ecea863f88\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.088086 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-inventory\") pod \"feab3b6f-2a30-4db6-af22-22ecea863f88\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.088368 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8zqv\" (UniqueName: \"kubernetes.io/projected/feab3b6f-2a30-4db6-af22-22ecea863f88-kube-api-access-h8zqv\") pod \"feab3b6f-2a30-4db6-af22-22ecea863f88\" (UID: \"feab3b6f-2a30-4db6-af22-22ecea863f88\") " Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.098689 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/feab3b6f-2a30-4db6-af22-22ecea863f88-kube-api-access-h8zqv" (OuterVolumeSpecName: "kube-api-access-h8zqv") pod "feab3b6f-2a30-4db6-af22-22ecea863f88" (UID: "feab3b6f-2a30-4db6-af22-22ecea863f88"). InnerVolumeSpecName "kube-api-access-h8zqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.128403 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-inventory" (OuterVolumeSpecName: "inventory") pod "feab3b6f-2a30-4db6-af22-22ecea863f88" (UID: "feab3b6f-2a30-4db6-af22-22ecea863f88"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.131949 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "feab3b6f-2a30-4db6-af22-22ecea863f88" (UID: "feab3b6f-2a30-4db6-af22-22ecea863f88"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.191465 4840 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.191703 4840 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/feab3b6f-2a30-4db6-af22-22ecea863f88-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.191811 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8zqv\" (UniqueName: \"kubernetes.io/projected/feab3b6f-2a30-4db6-af22-22ecea863f88-kube-api-access-h8zqv\") on node \"crc\" DevicePath \"\"" Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.415997 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" event={"ID":"feab3b6f-2a30-4db6-af22-22ecea863f88","Type":"ContainerDied","Data":"ef3869de851e3a549e6ea36c1db94c10d79a42d750f04175743def4bd506aed2"} Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.416379 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef3869de851e3a549e6ea36c1db94c10d79a42d750f04175743def4bd506aed2" Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.416007 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nkddw" Dec 09 18:01:41 crc kubenswrapper[4840]: I1209 18:01:41.418718 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"69d5991ffa1b4237f2cfce739eb1afc218c8c538da38015d667538aeea0a385e"} Dec 09 18:01:42 crc kubenswrapper[4840]: E1209 18:01:42.613271 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:01:42 crc kubenswrapper[4840]: E1209 18:01:42.613301 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:01:55 crc kubenswrapper[4840]: E1209 18:01:55.611104 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:01:55 crc kubenswrapper[4840]: E1209 18:01:55.611395 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:02:07 crc kubenswrapper[4840]: E1209 18:02:07.611544 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:02:07 crc kubenswrapper[4840]: E1209 18:02:07.612371 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:02:20 crc kubenswrapper[4840]: E1209 18:02:20.610730 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:02:21 crc kubenswrapper[4840]: E1209 18:02:21.612431 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:02:31 crc kubenswrapper[4840]: E1209 18:02:31.610812 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:02:34 crc kubenswrapper[4840]: E1209 18:02:34.628008 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:02:42 crc kubenswrapper[4840]: E1209 18:02:42.612392 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:02:45 crc kubenswrapper[4840]: E1209 18:02:45.611676 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:02:53 crc kubenswrapper[4840]: E1209 18:02:53.610927 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.105924 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-f6k87"] Dec 09 18:02:58 crc kubenswrapper[4840]: E1209 18:02:58.106793 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="feab3b6f-2a30-4db6-af22-22ecea863f88" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.106806 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="feab3b6f-2a30-4db6-af22-22ecea863f88" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 18:02:58 crc kubenswrapper[4840]: E1209 18:02:58.106826 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd6ba15a-4332-4bf1-87f7-c0ab97b100e6" containerName="keystone-cron" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.106832 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd6ba15a-4332-4bf1-87f7-c0ab97b100e6" containerName="keystone-cron" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.107050 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd6ba15a-4332-4bf1-87f7-c0ab97b100e6" containerName="keystone-cron" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.107064 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="feab3b6f-2a30-4db6-af22-22ecea863f88" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.108530 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.142071 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-f6k87"] Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.198633 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-catalog-content\") pod \"redhat-operators-f6k87\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.198687 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4sdp\" (UniqueName: \"kubernetes.io/projected/ffc3be5e-112c-457b-a5b9-8f8708649df4-kube-api-access-x4sdp\") pod \"redhat-operators-f6k87\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.198735 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-utilities\") pod \"redhat-operators-f6k87\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.301747 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-catalog-content\") pod \"redhat-operators-f6k87\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.301798 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4sdp\" (UniqueName: \"kubernetes.io/projected/ffc3be5e-112c-457b-a5b9-8f8708649df4-kube-api-access-x4sdp\") pod \"redhat-operators-f6k87\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.301855 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-utilities\") pod \"redhat-operators-f6k87\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.302366 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-catalog-content\") pod \"redhat-operators-f6k87\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.302728 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-utilities\") pod \"redhat-operators-f6k87\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.325651 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4sdp\" (UniqueName: \"kubernetes.io/projected/ffc3be5e-112c-457b-a5b9-8f8708649df4-kube-api-access-x4sdp\") pod \"redhat-operators-f6k87\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.430573 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:02:58 crc kubenswrapper[4840]: I1209 18:02:58.944705 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-f6k87"] Dec 09 18:02:59 crc kubenswrapper[4840]: I1209 18:02:59.428769 4840 generic.go:334] "Generic (PLEG): container finished" podID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerID="7151710c768d0b886f69cf27364deff57df7a04b4478d6f117d0eeddc66fe423" exitCode=0 Dec 09 18:02:59 crc kubenswrapper[4840]: I1209 18:02:59.428822 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f6k87" event={"ID":"ffc3be5e-112c-457b-a5b9-8f8708649df4","Type":"ContainerDied","Data":"7151710c768d0b886f69cf27364deff57df7a04b4478d6f117d0eeddc66fe423"} Dec 09 18:02:59 crc kubenswrapper[4840]: I1209 18:02:59.428850 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f6k87" event={"ID":"ffc3be5e-112c-457b-a5b9-8f8708649df4","Type":"ContainerStarted","Data":"fd568305daa43c2695640a6db286d1470faf93dc0fd7df1b23c8ecc2177b8a2f"} Dec 09 18:03:00 crc kubenswrapper[4840]: I1209 18:03:00.456375 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f6k87" event={"ID":"ffc3be5e-112c-457b-a5b9-8f8708649df4","Type":"ContainerStarted","Data":"b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7"} Dec 09 18:03:00 crc kubenswrapper[4840]: E1209 18:03:00.610481 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:03:03 crc kubenswrapper[4840]: I1209 18:03:03.497863 4840 generic.go:334] "Generic (PLEG): container finished" podID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerID="b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7" exitCode=0 Dec 09 18:03:03 crc kubenswrapper[4840]: I1209 18:03:03.497928 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f6k87" event={"ID":"ffc3be5e-112c-457b-a5b9-8f8708649df4","Type":"ContainerDied","Data":"b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7"} Dec 09 18:03:03 crc kubenswrapper[4840]: I1209 18:03:03.500653 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 18:03:04 crc kubenswrapper[4840]: I1209 18:03:04.509473 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f6k87" event={"ID":"ffc3be5e-112c-457b-a5b9-8f8708649df4","Type":"ContainerStarted","Data":"2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da"} Dec 09 18:03:04 crc kubenswrapper[4840]: I1209 18:03:04.537695 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-f6k87" podStartSLOduration=1.980847733 podStartE2EDuration="6.537667852s" podCreationTimestamp="2025-12-09 18:02:58 +0000 UTC" firstStartedPulling="2025-12-09 18:02:59.434697316 +0000 UTC m=+3965.425807949" lastFinishedPulling="2025-12-09 18:03:03.991517415 +0000 UTC m=+3969.982628068" observedRunningTime="2025-12-09 18:03:04.525987707 +0000 UTC m=+3970.517098370" watchObservedRunningTime="2025-12-09 18:03:04.537667852 +0000 UTC m=+3970.528778505" Dec 09 18:03:05 crc kubenswrapper[4840]: E1209 18:03:05.727140 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:03:05 crc kubenswrapper[4840]: E1209 18:03:05.727535 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:03:05 crc kubenswrapper[4840]: E1209 18:03:05.727700 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:03:05 crc kubenswrapper[4840]: E1209 18:03:05.728997 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:03:08 crc kubenswrapper[4840]: I1209 18:03:08.431339 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:03:08 crc kubenswrapper[4840]: I1209 18:03:08.431929 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:03:09 crc kubenswrapper[4840]: I1209 18:03:09.478408 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f6k87" podUID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerName="registry-server" probeResult="failure" output=< Dec 09 18:03:09 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 18:03:09 crc kubenswrapper[4840]: > Dec 09 18:03:15 crc kubenswrapper[4840]: E1209 18:03:15.611693 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:03:16 crc kubenswrapper[4840]: E1209 18:03:16.612168 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:03:18 crc kubenswrapper[4840]: I1209 18:03:18.510056 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:03:18 crc kubenswrapper[4840]: I1209 18:03:18.586460 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:03:18 crc kubenswrapper[4840]: I1209 18:03:18.759484 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f6k87"] Dec 09 18:03:19 crc kubenswrapper[4840]: I1209 18:03:19.670815 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-f6k87" podUID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerName="registry-server" containerID="cri-o://2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da" gracePeriod=2 Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.350625 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.446868 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-utilities\") pod \"ffc3be5e-112c-457b-a5b9-8f8708649df4\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.447273 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4sdp\" (UniqueName: \"kubernetes.io/projected/ffc3be5e-112c-457b-a5b9-8f8708649df4-kube-api-access-x4sdp\") pod \"ffc3be5e-112c-457b-a5b9-8f8708649df4\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.447340 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-catalog-content\") pod \"ffc3be5e-112c-457b-a5b9-8f8708649df4\" (UID: \"ffc3be5e-112c-457b-a5b9-8f8708649df4\") " Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.449853 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-utilities" (OuterVolumeSpecName: "utilities") pod "ffc3be5e-112c-457b-a5b9-8f8708649df4" (UID: "ffc3be5e-112c-457b-a5b9-8f8708649df4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.455273 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffc3be5e-112c-457b-a5b9-8f8708649df4-kube-api-access-x4sdp" (OuterVolumeSpecName: "kube-api-access-x4sdp") pod "ffc3be5e-112c-457b-a5b9-8f8708649df4" (UID: "ffc3be5e-112c-457b-a5b9-8f8708649df4"). InnerVolumeSpecName "kube-api-access-x4sdp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.549986 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.550026 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4sdp\" (UniqueName: \"kubernetes.io/projected/ffc3be5e-112c-457b-a5b9-8f8708649df4-kube-api-access-x4sdp\") on node \"crc\" DevicePath \"\"" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.578938 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ffc3be5e-112c-457b-a5b9-8f8708649df4" (UID: "ffc3be5e-112c-457b-a5b9-8f8708649df4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.652484 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffc3be5e-112c-457b-a5b9-8f8708649df4-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.683368 4840 generic.go:334] "Generic (PLEG): container finished" podID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerID="2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da" exitCode=0 Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.683418 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f6k87" event={"ID":"ffc3be5e-112c-457b-a5b9-8f8708649df4","Type":"ContainerDied","Data":"2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da"} Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.683549 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f6k87" event={"ID":"ffc3be5e-112c-457b-a5b9-8f8708649df4","Type":"ContainerDied","Data":"fd568305daa43c2695640a6db286d1470faf93dc0fd7df1b23c8ecc2177b8a2f"} Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.683574 4840 scope.go:117] "RemoveContainer" containerID="2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.683721 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f6k87" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.713899 4840 scope.go:117] "RemoveContainer" containerID="b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.724028 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f6k87"] Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.735112 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-f6k87"] Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.735234 4840 scope.go:117] "RemoveContainer" containerID="7151710c768d0b886f69cf27364deff57df7a04b4478d6f117d0eeddc66fe423" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.785059 4840 scope.go:117] "RemoveContainer" containerID="2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da" Dec 09 18:03:20 crc kubenswrapper[4840]: E1209 18:03:20.785661 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da\": container with ID starting with 2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da not found: ID does not exist" containerID="2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.785697 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da"} err="failed to get container status \"2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da\": rpc error: code = NotFound desc = could not find container \"2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da\": container with ID starting with 2d78134b0acf639ca5b6ada55ae69505ce1e0841149447f4f63215466d36d9da not found: ID does not exist" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.785723 4840 scope.go:117] "RemoveContainer" containerID="b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7" Dec 09 18:03:20 crc kubenswrapper[4840]: E1209 18:03:20.786064 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7\": container with ID starting with b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7 not found: ID does not exist" containerID="b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.786093 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7"} err="failed to get container status \"b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7\": rpc error: code = NotFound desc = could not find container \"b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7\": container with ID starting with b0dabd37294f25ef8b6723accd7ef83a0833c7b4e4225eb82f9d0d6d8f511ac7 not found: ID does not exist" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.786110 4840 scope.go:117] "RemoveContainer" containerID="7151710c768d0b886f69cf27364deff57df7a04b4478d6f117d0eeddc66fe423" Dec 09 18:03:20 crc kubenswrapper[4840]: E1209 18:03:20.786423 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7151710c768d0b886f69cf27364deff57df7a04b4478d6f117d0eeddc66fe423\": container with ID starting with 7151710c768d0b886f69cf27364deff57df7a04b4478d6f117d0eeddc66fe423 not found: ID does not exist" containerID="7151710c768d0b886f69cf27364deff57df7a04b4478d6f117d0eeddc66fe423" Dec 09 18:03:20 crc kubenswrapper[4840]: I1209 18:03:20.786454 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7151710c768d0b886f69cf27364deff57df7a04b4478d6f117d0eeddc66fe423"} err="failed to get container status \"7151710c768d0b886f69cf27364deff57df7a04b4478d6f117d0eeddc66fe423\": rpc error: code = NotFound desc = could not find container \"7151710c768d0b886f69cf27364deff57df7a04b4478d6f117d0eeddc66fe423\": container with ID starting with 7151710c768d0b886f69cf27364deff57df7a04b4478d6f117d0eeddc66fe423 not found: ID does not exist" Dec 09 18:03:22 crc kubenswrapper[4840]: I1209 18:03:22.711209 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffc3be5e-112c-457b-a5b9-8f8708649df4" path="/var/lib/kubelet/pods/ffc3be5e-112c-457b-a5b9-8f8708649df4/volumes" Dec 09 18:03:28 crc kubenswrapper[4840]: E1209 18:03:28.610471 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:03:29 crc kubenswrapper[4840]: E1209 18:03:29.609921 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:03:40 crc kubenswrapper[4840]: E1209 18:03:40.610879 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:03:41 crc kubenswrapper[4840]: E1209 18:03:41.730790 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:03:41 crc kubenswrapper[4840]: E1209 18:03:41.731150 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:03:41 crc kubenswrapper[4840]: E1209 18:03:41.731278 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:03:41 crc kubenswrapper[4840]: E1209 18:03:41.733815 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:03:54 crc kubenswrapper[4840]: E1209 18:03:54.626555 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:03:56 crc kubenswrapper[4840]: E1209 18:03:56.611943 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:04:04 crc kubenswrapper[4840]: I1209 18:04:04.035953 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:04:04 crc kubenswrapper[4840]: I1209 18:04:04.036493 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:04:05 crc kubenswrapper[4840]: E1209 18:04:05.611497 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:04:10 crc kubenswrapper[4840]: E1209 18:04:10.625243 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.042789 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc"] Dec 09 18:04:19 crc kubenswrapper[4840]: E1209 18:04:19.044187 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerName="extract-utilities" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.044210 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerName="extract-utilities" Dec 09 18:04:19 crc kubenswrapper[4840]: E1209 18:04:19.044231 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerName="extract-content" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.044246 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerName="extract-content" Dec 09 18:04:19 crc kubenswrapper[4840]: E1209 18:04:19.044302 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerName="registry-server" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.044315 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerName="registry-server" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.044683 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc3be5e-112c-457b-a5b9-8f8708649df4" containerName="registry-server" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.045932 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.048238 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.049255 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.050257 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.056424 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qrgfg" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.057106 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc"] Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.237369 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-77shc\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.237925 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jkld\" (UniqueName: \"kubernetes.io/projected/38054200-bff9-439b-a60f-ff6f3b8926f0-kube-api-access-2jkld\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-77shc\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.238057 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-77shc\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.340540 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-77shc\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.340745 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jkld\" (UniqueName: \"kubernetes.io/projected/38054200-bff9-439b-a60f-ff6f3b8926f0-kube-api-access-2jkld\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-77shc\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.340836 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-77shc\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.348027 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-77shc\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.349007 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-77shc\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.374213 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jkld\" (UniqueName: \"kubernetes.io/projected/38054200-bff9-439b-a60f-ff6f3b8926f0-kube-api-access-2jkld\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-77shc\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.385268 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:04:19 crc kubenswrapper[4840]: E1209 18:04:19.612286 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:04:19 crc kubenswrapper[4840]: I1209 18:04:19.975348 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc"] Dec 09 18:04:20 crc kubenswrapper[4840]: I1209 18:04:20.371663 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" event={"ID":"38054200-bff9-439b-a60f-ff6f3b8926f0","Type":"ContainerStarted","Data":"8b325d7573dbb16b89b61d5a9047b6cffb6b70bce4ca1babaa094914e7713ce3"} Dec 09 18:04:21 crc kubenswrapper[4840]: I1209 18:04:21.381259 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" event={"ID":"38054200-bff9-439b-a60f-ff6f3b8926f0","Type":"ContainerStarted","Data":"af432df564e475ab6d094b31aa44a5698e2403f75aedea6850465e37efa86977"} Dec 09 18:04:21 crc kubenswrapper[4840]: I1209 18:04:21.413217 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" podStartSLOduration=1.918949435 podStartE2EDuration="2.41319549s" podCreationTimestamp="2025-12-09 18:04:19 +0000 UTC" firstStartedPulling="2025-12-09 18:04:19.971428621 +0000 UTC m=+4045.962539264" lastFinishedPulling="2025-12-09 18:04:20.465674656 +0000 UTC m=+4046.456785319" observedRunningTime="2025-12-09 18:04:21.413161159 +0000 UTC m=+4047.404271792" watchObservedRunningTime="2025-12-09 18:04:21.41319549 +0000 UTC m=+4047.404306133" Dec 09 18:04:25 crc kubenswrapper[4840]: E1209 18:04:25.612600 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:04:32 crc kubenswrapper[4840]: E1209 18:04:32.613343 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:04:34 crc kubenswrapper[4840]: I1209 18:04:34.036385 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:04:34 crc kubenswrapper[4840]: I1209 18:04:34.037097 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:04:36 crc kubenswrapper[4840]: E1209 18:04:36.615635 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:04:45 crc kubenswrapper[4840]: E1209 18:04:45.610890 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:04:51 crc kubenswrapper[4840]: E1209 18:04:51.610294 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:04:56 crc kubenswrapper[4840]: E1209 18:04:56.611491 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:05:04 crc kubenswrapper[4840]: I1209 18:05:04.036726 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:05:04 crc kubenswrapper[4840]: I1209 18:05:04.041769 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:05:04 crc kubenswrapper[4840]: I1209 18:05:04.041846 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 18:05:04 crc kubenswrapper[4840]: I1209 18:05:04.043299 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"69d5991ffa1b4237f2cfce739eb1afc218c8c538da38015d667538aeea0a385e"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 18:05:04 crc kubenswrapper[4840]: I1209 18:05:04.043432 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://69d5991ffa1b4237f2cfce739eb1afc218c8c538da38015d667538aeea0a385e" gracePeriod=600 Dec 09 18:05:04 crc kubenswrapper[4840]: E1209 18:05:04.619335 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:05:04 crc kubenswrapper[4840]: I1209 18:05:04.905260 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="69d5991ffa1b4237f2cfce739eb1afc218c8c538da38015d667538aeea0a385e" exitCode=0 Dec 09 18:05:04 crc kubenswrapper[4840]: I1209 18:05:04.905348 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"69d5991ffa1b4237f2cfce739eb1afc218c8c538da38015d667538aeea0a385e"} Dec 09 18:05:04 crc kubenswrapper[4840]: I1209 18:05:04.905596 4840 scope.go:117] "RemoveContainer" containerID="beca0881441a96fa8dcdf5f85cb0ce28d5db2383ec4231c99f360eee4d1321bc" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.372834 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-99j4p"] Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.376110 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.390884 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-99j4p"] Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.526339 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-catalog-content\") pod \"redhat-marketplace-99j4p\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.526472 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndmjr\" (UniqueName: \"kubernetes.io/projected/9391e6fe-c455-4704-9a69-36d1d0392886-kube-api-access-ndmjr\") pod \"redhat-marketplace-99j4p\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.526574 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-utilities\") pod \"redhat-marketplace-99j4p\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.629043 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-catalog-content\") pod \"redhat-marketplace-99j4p\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.629153 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndmjr\" (UniqueName: \"kubernetes.io/projected/9391e6fe-c455-4704-9a69-36d1d0392886-kube-api-access-ndmjr\") pod \"redhat-marketplace-99j4p\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.629243 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-utilities\") pod \"redhat-marketplace-99j4p\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.629719 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-catalog-content\") pod \"redhat-marketplace-99j4p\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.629858 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-utilities\") pod \"redhat-marketplace-99j4p\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.654918 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndmjr\" (UniqueName: \"kubernetes.io/projected/9391e6fe-c455-4704-9a69-36d1d0392886-kube-api-access-ndmjr\") pod \"redhat-marketplace-99j4p\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.705001 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:05 crc kubenswrapper[4840]: I1209 18:05:05.917782 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141"} Dec 09 18:05:06 crc kubenswrapper[4840]: I1209 18:05:06.236727 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-99j4p"] Dec 09 18:05:06 crc kubenswrapper[4840]: I1209 18:05:06.930402 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99j4p" event={"ID":"9391e6fe-c455-4704-9a69-36d1d0392886","Type":"ContainerStarted","Data":"aa8e335f2d14409ebfa3ee64c7fcb9c3e31f2040e9656c201e7c0857e0d532f7"} Dec 09 18:05:07 crc kubenswrapper[4840]: E1209 18:05:07.610674 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:05:07 crc kubenswrapper[4840]: I1209 18:05:07.940242 4840 generic.go:334] "Generic (PLEG): container finished" podID="9391e6fe-c455-4704-9a69-36d1d0392886" containerID="d0fbefe063e2f97a29dd6b7afe216e66629e4ba8a8df35f691d4ec63fc4888be" exitCode=0 Dec 09 18:05:07 crc kubenswrapper[4840]: I1209 18:05:07.940283 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99j4p" event={"ID":"9391e6fe-c455-4704-9a69-36d1d0392886","Type":"ContainerDied","Data":"d0fbefe063e2f97a29dd6b7afe216e66629e4ba8a8df35f691d4ec63fc4888be"} Dec 09 18:05:09 crc kubenswrapper[4840]: I1209 18:05:09.964890 4840 generic.go:334] "Generic (PLEG): container finished" podID="9391e6fe-c455-4704-9a69-36d1d0392886" containerID="ef7e56f444ddcf96041c80edef220ac6b0c7739423c5b04ed40890d30a30ed6c" exitCode=0 Dec 09 18:05:09 crc kubenswrapper[4840]: I1209 18:05:09.964955 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99j4p" event={"ID":"9391e6fe-c455-4704-9a69-36d1d0392886","Type":"ContainerDied","Data":"ef7e56f444ddcf96041c80edef220ac6b0c7739423c5b04ed40890d30a30ed6c"} Dec 09 18:05:10 crc kubenswrapper[4840]: I1209 18:05:10.979821 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99j4p" event={"ID":"9391e6fe-c455-4704-9a69-36d1d0392886","Type":"ContainerStarted","Data":"ee84eab71323ce107ba2025b5fdc0be9cd3c7e5134aa115f7f7e01198d634e3f"} Dec 09 18:05:11 crc kubenswrapper[4840]: I1209 18:05:11.002632 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-99j4p" podStartSLOduration=3.58759401 podStartE2EDuration="6.002604001s" podCreationTimestamp="2025-12-09 18:05:05 +0000 UTC" firstStartedPulling="2025-12-09 18:05:07.944438965 +0000 UTC m=+4093.935549638" lastFinishedPulling="2025-12-09 18:05:10.359448996 +0000 UTC m=+4096.350559629" observedRunningTime="2025-12-09 18:05:10.99628736 +0000 UTC m=+4096.987397993" watchObservedRunningTime="2025-12-09 18:05:11.002604001 +0000 UTC m=+4096.993714664" Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.378943 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l2z6q"] Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.391302 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l2z6q"] Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.391434 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.516779 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nvx8\" (UniqueName: \"kubernetes.io/projected/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-kube-api-access-5nvx8\") pod \"certified-operators-l2z6q\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.517139 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-catalog-content\") pod \"certified-operators-l2z6q\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.517271 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-utilities\") pod \"certified-operators-l2z6q\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.620938 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nvx8\" (UniqueName: \"kubernetes.io/projected/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-kube-api-access-5nvx8\") pod \"certified-operators-l2z6q\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.621130 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-catalog-content\") pod \"certified-operators-l2z6q\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.621290 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-utilities\") pod \"certified-operators-l2z6q\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.621637 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-catalog-content\") pod \"certified-operators-l2z6q\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.621742 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-utilities\") pod \"certified-operators-l2z6q\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.640412 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nvx8\" (UniqueName: \"kubernetes.io/projected/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-kube-api-access-5nvx8\") pod \"certified-operators-l2z6q\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:13 crc kubenswrapper[4840]: I1209 18:05:13.718531 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:14 crc kubenswrapper[4840]: I1209 18:05:14.188280 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l2z6q"] Dec 09 18:05:14 crc kubenswrapper[4840]: W1209 18:05:14.195108 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ed9f51f_60ed_490f_9c7b_6bd184eeebb3.slice/crio-7cd3f6764a4f8cccd1de660292c65e9cb81cf28de295716ffc727764ee92fc1d WatchSource:0}: Error finding container 7cd3f6764a4f8cccd1de660292c65e9cb81cf28de295716ffc727764ee92fc1d: Status 404 returned error can't find the container with id 7cd3f6764a4f8cccd1de660292c65e9cb81cf28de295716ffc727764ee92fc1d Dec 09 18:05:15 crc kubenswrapper[4840]: I1209 18:05:15.022564 4840 generic.go:334] "Generic (PLEG): container finished" podID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" containerID="133837f41fbb99243dd1b0d6f42306be25f3e98dc1839f01b88f80206e5e7b3b" exitCode=0 Dec 09 18:05:15 crc kubenswrapper[4840]: I1209 18:05:15.022888 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2z6q" event={"ID":"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3","Type":"ContainerDied","Data":"133837f41fbb99243dd1b0d6f42306be25f3e98dc1839f01b88f80206e5e7b3b"} Dec 09 18:05:15 crc kubenswrapper[4840]: I1209 18:05:15.022917 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2z6q" event={"ID":"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3","Type":"ContainerStarted","Data":"7cd3f6764a4f8cccd1de660292c65e9cb81cf28de295716ffc727764ee92fc1d"} Dec 09 18:05:15 crc kubenswrapper[4840]: I1209 18:05:15.705369 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:15 crc kubenswrapper[4840]: I1209 18:05:15.705733 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:15 crc kubenswrapper[4840]: I1209 18:05:15.764980 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:16 crc kubenswrapper[4840]: I1209 18:05:16.551158 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:17 crc kubenswrapper[4840]: I1209 18:05:17.044379 4840 generic.go:334] "Generic (PLEG): container finished" podID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" containerID="e144bec6903bc177b5827407bf2304267283c7f1b3948c12d5c8f52906cf3ddf" exitCode=0 Dec 09 18:05:17 crc kubenswrapper[4840]: I1209 18:05:17.045543 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2z6q" event={"ID":"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3","Type":"ContainerDied","Data":"e144bec6903bc177b5827407bf2304267283c7f1b3948c12d5c8f52906cf3ddf"} Dec 09 18:05:18 crc kubenswrapper[4840]: I1209 18:05:18.058948 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2z6q" event={"ID":"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3","Type":"ContainerStarted","Data":"165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540"} Dec 09 18:05:18 crc kubenswrapper[4840]: I1209 18:05:18.085088 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-99j4p"] Dec 09 18:05:18 crc kubenswrapper[4840]: I1209 18:05:18.085801 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-99j4p" podUID="9391e6fe-c455-4704-9a69-36d1d0392886" containerName="registry-server" containerID="cri-o://ee84eab71323ce107ba2025b5fdc0be9cd3c7e5134aa115f7f7e01198d634e3f" gracePeriod=2 Dec 09 18:05:18 crc kubenswrapper[4840]: I1209 18:05:18.091143 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l2z6q" podStartSLOduration=2.620193718 podStartE2EDuration="5.091125486s" podCreationTimestamp="2025-12-09 18:05:13 +0000 UTC" firstStartedPulling="2025-12-09 18:05:15.028591024 +0000 UTC m=+4101.019701657" lastFinishedPulling="2025-12-09 18:05:17.499522792 +0000 UTC m=+4103.490633425" observedRunningTime="2025-12-09 18:05:18.080343736 +0000 UTC m=+4104.071454379" watchObservedRunningTime="2025-12-09 18:05:18.091125486 +0000 UTC m=+4104.082236119" Dec 09 18:05:18 crc kubenswrapper[4840]: E1209 18:05:18.610310 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:05:18 crc kubenswrapper[4840]: E1209 18:05:18.610720 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.075656 4840 generic.go:334] "Generic (PLEG): container finished" podID="9391e6fe-c455-4704-9a69-36d1d0392886" containerID="ee84eab71323ce107ba2025b5fdc0be9cd3c7e5134aa115f7f7e01198d634e3f" exitCode=0 Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.075742 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99j4p" event={"ID":"9391e6fe-c455-4704-9a69-36d1d0392886","Type":"ContainerDied","Data":"ee84eab71323ce107ba2025b5fdc0be9cd3c7e5134aa115f7f7e01198d634e3f"} Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.076009 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99j4p" event={"ID":"9391e6fe-c455-4704-9a69-36d1d0392886","Type":"ContainerDied","Data":"aa8e335f2d14409ebfa3ee64c7fcb9c3e31f2040e9656c201e7c0857e0d532f7"} Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.076032 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa8e335f2d14409ebfa3ee64c7fcb9c3e31f2040e9656c201e7c0857e0d532f7" Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.152357 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.348543 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndmjr\" (UniqueName: \"kubernetes.io/projected/9391e6fe-c455-4704-9a69-36d1d0392886-kube-api-access-ndmjr\") pod \"9391e6fe-c455-4704-9a69-36d1d0392886\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.349574 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-catalog-content\") pod \"9391e6fe-c455-4704-9a69-36d1d0392886\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.349647 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-utilities\") pod \"9391e6fe-c455-4704-9a69-36d1d0392886\" (UID: \"9391e6fe-c455-4704-9a69-36d1d0392886\") " Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.350588 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-utilities" (OuterVolumeSpecName: "utilities") pod "9391e6fe-c455-4704-9a69-36d1d0392886" (UID: "9391e6fe-c455-4704-9a69-36d1d0392886"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.354228 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9391e6fe-c455-4704-9a69-36d1d0392886-kube-api-access-ndmjr" (OuterVolumeSpecName: "kube-api-access-ndmjr") pod "9391e6fe-c455-4704-9a69-36d1d0392886" (UID: "9391e6fe-c455-4704-9a69-36d1d0392886"). InnerVolumeSpecName "kube-api-access-ndmjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.373069 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9391e6fe-c455-4704-9a69-36d1d0392886" (UID: "9391e6fe-c455-4704-9a69-36d1d0392886"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.451787 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndmjr\" (UniqueName: \"kubernetes.io/projected/9391e6fe-c455-4704-9a69-36d1d0392886-kube-api-access-ndmjr\") on node \"crc\" DevicePath \"\"" Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.451854 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:05:19 crc kubenswrapper[4840]: I1209 18:05:19.451867 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9391e6fe-c455-4704-9a69-36d1d0392886-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:05:20 crc kubenswrapper[4840]: I1209 18:05:20.096742 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99j4p" Dec 09 18:05:20 crc kubenswrapper[4840]: I1209 18:05:20.139521 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-99j4p"] Dec 09 18:05:20 crc kubenswrapper[4840]: I1209 18:05:20.148708 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-99j4p"] Dec 09 18:05:20 crc kubenswrapper[4840]: I1209 18:05:20.621553 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9391e6fe-c455-4704-9a69-36d1d0392886" path="/var/lib/kubelet/pods/9391e6fe-c455-4704-9a69-36d1d0392886/volumes" Dec 09 18:05:23 crc kubenswrapper[4840]: I1209 18:05:23.718891 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:23 crc kubenswrapper[4840]: I1209 18:05:23.719370 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:23 crc kubenswrapper[4840]: I1209 18:05:23.769698 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:24 crc kubenswrapper[4840]: I1209 18:05:24.185974 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:24 crc kubenswrapper[4840]: I1209 18:05:24.878845 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l2z6q"] Dec 09 18:05:26 crc kubenswrapper[4840]: I1209 18:05:26.160150 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l2z6q" podUID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" containerName="registry-server" containerID="cri-o://165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540" gracePeriod=2 Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.149466 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.174044 4840 generic.go:334] "Generic (PLEG): container finished" podID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" containerID="165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540" exitCode=0 Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.174103 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2z6q" event={"ID":"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3","Type":"ContainerDied","Data":"165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540"} Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.174144 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2z6q" event={"ID":"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3","Type":"ContainerDied","Data":"7cd3f6764a4f8cccd1de660292c65e9cb81cf28de295716ffc727764ee92fc1d"} Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.174176 4840 scope.go:117] "RemoveContainer" containerID="165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.174399 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l2z6q" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.197811 4840 scope.go:117] "RemoveContainer" containerID="e144bec6903bc177b5827407bf2304267283c7f1b3948c12d5c8f52906cf3ddf" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.233501 4840 scope.go:117] "RemoveContainer" containerID="133837f41fbb99243dd1b0d6f42306be25f3e98dc1839f01b88f80206e5e7b3b" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.271802 4840 scope.go:117] "RemoveContainer" containerID="165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540" Dec 09 18:05:27 crc kubenswrapper[4840]: E1209 18:05:27.272920 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540\": container with ID starting with 165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540 not found: ID does not exist" containerID="165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.273031 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540"} err="failed to get container status \"165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540\": rpc error: code = NotFound desc = could not find container \"165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540\": container with ID starting with 165ee1986a145a11e8866193389793a5f25bb2e3de5e7c2cc643cde536c15540 not found: ID does not exist" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.273065 4840 scope.go:117] "RemoveContainer" containerID="e144bec6903bc177b5827407bf2304267283c7f1b3948c12d5c8f52906cf3ddf" Dec 09 18:05:27 crc kubenswrapper[4840]: E1209 18:05:27.273522 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e144bec6903bc177b5827407bf2304267283c7f1b3948c12d5c8f52906cf3ddf\": container with ID starting with e144bec6903bc177b5827407bf2304267283c7f1b3948c12d5c8f52906cf3ddf not found: ID does not exist" containerID="e144bec6903bc177b5827407bf2304267283c7f1b3948c12d5c8f52906cf3ddf" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.273557 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e144bec6903bc177b5827407bf2304267283c7f1b3948c12d5c8f52906cf3ddf"} err="failed to get container status \"e144bec6903bc177b5827407bf2304267283c7f1b3948c12d5c8f52906cf3ddf\": rpc error: code = NotFound desc = could not find container \"e144bec6903bc177b5827407bf2304267283c7f1b3948c12d5c8f52906cf3ddf\": container with ID starting with e144bec6903bc177b5827407bf2304267283c7f1b3948c12d5c8f52906cf3ddf not found: ID does not exist" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.273573 4840 scope.go:117] "RemoveContainer" containerID="133837f41fbb99243dd1b0d6f42306be25f3e98dc1839f01b88f80206e5e7b3b" Dec 09 18:05:27 crc kubenswrapper[4840]: E1209 18:05:27.273919 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"133837f41fbb99243dd1b0d6f42306be25f3e98dc1839f01b88f80206e5e7b3b\": container with ID starting with 133837f41fbb99243dd1b0d6f42306be25f3e98dc1839f01b88f80206e5e7b3b not found: ID does not exist" containerID="133837f41fbb99243dd1b0d6f42306be25f3e98dc1839f01b88f80206e5e7b3b" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.274053 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"133837f41fbb99243dd1b0d6f42306be25f3e98dc1839f01b88f80206e5e7b3b"} err="failed to get container status \"133837f41fbb99243dd1b0d6f42306be25f3e98dc1839f01b88f80206e5e7b3b\": rpc error: code = NotFound desc = could not find container \"133837f41fbb99243dd1b0d6f42306be25f3e98dc1839f01b88f80206e5e7b3b\": container with ID starting with 133837f41fbb99243dd1b0d6f42306be25f3e98dc1839f01b88f80206e5e7b3b not found: ID does not exist" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.329079 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nvx8\" (UniqueName: \"kubernetes.io/projected/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-kube-api-access-5nvx8\") pod \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.329439 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-utilities\") pod \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.329559 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-catalog-content\") pod \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\" (UID: \"9ed9f51f-60ed-490f-9c7b-6bd184eeebb3\") " Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.330270 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-utilities" (OuterVolumeSpecName: "utilities") pod "9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" (UID: "9ed9f51f-60ed-490f-9c7b-6bd184eeebb3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.335782 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-kube-api-access-5nvx8" (OuterVolumeSpecName: "kube-api-access-5nvx8") pod "9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" (UID: "9ed9f51f-60ed-490f-9c7b-6bd184eeebb3"). InnerVolumeSpecName "kube-api-access-5nvx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.388555 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" (UID: "9ed9f51f-60ed-490f-9c7b-6bd184eeebb3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.432542 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nvx8\" (UniqueName: \"kubernetes.io/projected/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-kube-api-access-5nvx8\") on node \"crc\" DevicePath \"\"" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.432581 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.432590 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.533603 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l2z6q"] Dec 09 18:05:27 crc kubenswrapper[4840]: I1209 18:05:27.549937 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l2z6q"] Dec 09 18:05:28 crc kubenswrapper[4840]: I1209 18:05:28.627852 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" path="/var/lib/kubelet/pods/9ed9f51f-60ed-490f-9c7b-6bd184eeebb3/volumes" Dec 09 18:05:29 crc kubenswrapper[4840]: E1209 18:05:29.610399 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:05:30 crc kubenswrapper[4840]: E1209 18:05:30.611548 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:05:42 crc kubenswrapper[4840]: E1209 18:05:42.610846 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:05:43 crc kubenswrapper[4840]: E1209 18:05:43.760323 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.850522 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-j27ng"] Dec 09 18:05:52 crc kubenswrapper[4840]: E1209 18:05:52.853077 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9391e6fe-c455-4704-9a69-36d1d0392886" containerName="extract-content" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.853243 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9391e6fe-c455-4704-9a69-36d1d0392886" containerName="extract-content" Dec 09 18:05:52 crc kubenswrapper[4840]: E1209 18:05:52.853413 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" containerName="extract-utilities" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.853536 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" containerName="extract-utilities" Dec 09 18:05:52 crc kubenswrapper[4840]: E1209 18:05:52.853668 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" containerName="extract-content" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.853804 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" containerName="extract-content" Dec 09 18:05:52 crc kubenswrapper[4840]: E1209 18:05:52.853952 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" containerName="registry-server" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.854183 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" containerName="registry-server" Dec 09 18:05:52 crc kubenswrapper[4840]: E1209 18:05:52.854313 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9391e6fe-c455-4704-9a69-36d1d0392886" containerName="extract-utilities" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.854432 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9391e6fe-c455-4704-9a69-36d1d0392886" containerName="extract-utilities" Dec 09 18:05:52 crc kubenswrapper[4840]: E1209 18:05:52.854566 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9391e6fe-c455-4704-9a69-36d1d0392886" containerName="registry-server" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.854691 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9391e6fe-c455-4704-9a69-36d1d0392886" containerName="registry-server" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.855237 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ed9f51f-60ed-490f-9c7b-6bd184eeebb3" containerName="registry-server" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.855409 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="9391e6fe-c455-4704-9a69-36d1d0392886" containerName="registry-server" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.861055 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.893785 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j27ng"] Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.915661 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-utilities\") pod \"community-operators-j27ng\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.915829 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-catalog-content\") pod \"community-operators-j27ng\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:05:52 crc kubenswrapper[4840]: I1209 18:05:52.915894 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmjbb\" (UniqueName: \"kubernetes.io/projected/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-kube-api-access-hmjbb\") pod \"community-operators-j27ng\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:05:53 crc kubenswrapper[4840]: I1209 18:05:53.017211 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-utilities\") pod \"community-operators-j27ng\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:05:53 crc kubenswrapper[4840]: I1209 18:05:53.017288 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-catalog-content\") pod \"community-operators-j27ng\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:05:53 crc kubenswrapper[4840]: I1209 18:05:53.017328 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmjbb\" (UniqueName: \"kubernetes.io/projected/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-kube-api-access-hmjbb\") pod \"community-operators-j27ng\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:05:53 crc kubenswrapper[4840]: I1209 18:05:53.017802 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-utilities\") pod \"community-operators-j27ng\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:05:53 crc kubenswrapper[4840]: I1209 18:05:53.017844 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-catalog-content\") pod \"community-operators-j27ng\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:05:53 crc kubenswrapper[4840]: I1209 18:05:53.037239 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmjbb\" (UniqueName: \"kubernetes.io/projected/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-kube-api-access-hmjbb\") pod \"community-operators-j27ng\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:05:53 crc kubenswrapper[4840]: I1209 18:05:53.181956 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:05:53 crc kubenswrapper[4840]: W1209 18:05:53.805746 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffe2d4b9_a4b6_40b7_a2f8_9d47e1588b7d.slice/crio-e546abed67e374de42c3aa4636e7c8f99452b4f0c38f567e57e21cbe5cfee89c WatchSource:0}: Error finding container e546abed67e374de42c3aa4636e7c8f99452b4f0c38f567e57e21cbe5cfee89c: Status 404 returned error can't find the container with id e546abed67e374de42c3aa4636e7c8f99452b4f0c38f567e57e21cbe5cfee89c Dec 09 18:05:53 crc kubenswrapper[4840]: I1209 18:05:53.813140 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j27ng"] Dec 09 18:05:54 crc kubenswrapper[4840]: I1209 18:05:54.470189 4840 generic.go:334] "Generic (PLEG): container finished" podID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" containerID="5c31cf28d6fda857bfea24008c8af636e79445277dda98d08b42f0d4793d0d64" exitCode=0 Dec 09 18:05:54 crc kubenswrapper[4840]: I1209 18:05:54.470287 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j27ng" event={"ID":"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d","Type":"ContainerDied","Data":"5c31cf28d6fda857bfea24008c8af636e79445277dda98d08b42f0d4793d0d64"} Dec 09 18:05:54 crc kubenswrapper[4840]: I1209 18:05:54.470560 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j27ng" event={"ID":"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d","Type":"ContainerStarted","Data":"e546abed67e374de42c3aa4636e7c8f99452b4f0c38f567e57e21cbe5cfee89c"} Dec 09 18:05:55 crc kubenswrapper[4840]: I1209 18:05:55.513114 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j27ng" event={"ID":"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d","Type":"ContainerStarted","Data":"d055c98dc8bc42962243ef4f957625bb23da1c9c6777b3b48d4a7bd47a2d6a6f"} Dec 09 18:05:56 crc kubenswrapper[4840]: I1209 18:05:56.524389 4840 generic.go:334] "Generic (PLEG): container finished" podID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" containerID="d055c98dc8bc42962243ef4f957625bb23da1c9c6777b3b48d4a7bd47a2d6a6f" exitCode=0 Dec 09 18:05:56 crc kubenswrapper[4840]: I1209 18:05:56.524446 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j27ng" event={"ID":"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d","Type":"ContainerDied","Data":"d055c98dc8bc42962243ef4f957625bb23da1c9c6777b3b48d4a7bd47a2d6a6f"} Dec 09 18:05:56 crc kubenswrapper[4840]: E1209 18:05:56.610348 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:05:57 crc kubenswrapper[4840]: I1209 18:05:57.536416 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j27ng" event={"ID":"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d","Type":"ContainerStarted","Data":"549f2ad7861e745fd5158b8d2790d6f8f9b54b617027a3db9dbfaded0b1860d8"} Dec 09 18:05:57 crc kubenswrapper[4840]: I1209 18:05:57.561158 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-j27ng" podStartSLOduration=3.068118411 podStartE2EDuration="5.561139624s" podCreationTimestamp="2025-12-09 18:05:52 +0000 UTC" firstStartedPulling="2025-12-09 18:05:54.471764461 +0000 UTC m=+4140.462875094" lastFinishedPulling="2025-12-09 18:05:56.964785644 +0000 UTC m=+4142.955896307" observedRunningTime="2025-12-09 18:05:57.555559174 +0000 UTC m=+4143.546669817" watchObservedRunningTime="2025-12-09 18:05:57.561139624 +0000 UTC m=+4143.552250257" Dec 09 18:05:57 crc kubenswrapper[4840]: E1209 18:05:57.612062 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:06:03 crc kubenswrapper[4840]: I1209 18:06:03.182211 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:06:03 crc kubenswrapper[4840]: I1209 18:06:03.182795 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:06:03 crc kubenswrapper[4840]: I1209 18:06:03.238160 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:06:03 crc kubenswrapper[4840]: I1209 18:06:03.642057 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:06:03 crc kubenswrapper[4840]: I1209 18:06:03.696247 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j27ng"] Dec 09 18:06:05 crc kubenswrapper[4840]: I1209 18:06:05.611084 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-j27ng" podUID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" containerName="registry-server" containerID="cri-o://549f2ad7861e745fd5158b8d2790d6f8f9b54b617027a3db9dbfaded0b1860d8" gracePeriod=2 Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.622238 4840 generic.go:334] "Generic (PLEG): container finished" podID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" containerID="549f2ad7861e745fd5158b8d2790d6f8f9b54b617027a3db9dbfaded0b1860d8" exitCode=0 Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.622303 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j27ng" event={"ID":"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d","Type":"ContainerDied","Data":"549f2ad7861e745fd5158b8d2790d6f8f9b54b617027a3db9dbfaded0b1860d8"} Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.730613 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.851491 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hmjbb\" (UniqueName: \"kubernetes.io/projected/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-kube-api-access-hmjbb\") pod \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.851631 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-utilities\") pod \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.851655 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-catalog-content\") pod \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\" (UID: \"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d\") " Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.856284 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-utilities" (OuterVolumeSpecName: "utilities") pod "ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" (UID: "ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.864912 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-kube-api-access-hmjbb" (OuterVolumeSpecName: "kube-api-access-hmjbb") pod "ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" (UID: "ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d"). InnerVolumeSpecName "kube-api-access-hmjbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.913304 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" (UID: "ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.954390 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.954435 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:06:06 crc kubenswrapper[4840]: I1209 18:06:06.954453 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hmjbb\" (UniqueName: \"kubernetes.io/projected/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d-kube-api-access-hmjbb\") on node \"crc\" DevicePath \"\"" Dec 09 18:06:07 crc kubenswrapper[4840]: I1209 18:06:07.633752 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j27ng" event={"ID":"ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d","Type":"ContainerDied","Data":"e546abed67e374de42c3aa4636e7c8f99452b4f0c38f567e57e21cbe5cfee89c"} Dec 09 18:06:07 crc kubenswrapper[4840]: I1209 18:06:07.634120 4840 scope.go:117] "RemoveContainer" containerID="549f2ad7861e745fd5158b8d2790d6f8f9b54b617027a3db9dbfaded0b1860d8" Dec 09 18:06:07 crc kubenswrapper[4840]: I1209 18:06:07.633828 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j27ng" Dec 09 18:06:07 crc kubenswrapper[4840]: I1209 18:06:07.662127 4840 scope.go:117] "RemoveContainer" containerID="d055c98dc8bc42962243ef4f957625bb23da1c9c6777b3b48d4a7bd47a2d6a6f" Dec 09 18:06:07 crc kubenswrapper[4840]: I1209 18:06:07.705101 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j27ng"] Dec 09 18:06:07 crc kubenswrapper[4840]: I1209 18:06:07.715358 4840 scope.go:117] "RemoveContainer" containerID="5c31cf28d6fda857bfea24008c8af636e79445277dda98d08b42f0d4793d0d64" Dec 09 18:06:07 crc kubenswrapper[4840]: I1209 18:06:07.728735 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-j27ng"] Dec 09 18:06:08 crc kubenswrapper[4840]: E1209 18:06:08.611856 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:06:08 crc kubenswrapper[4840]: I1209 18:06:08.630617 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" path="/var/lib/kubelet/pods/ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d/volumes" Dec 09 18:06:10 crc kubenswrapper[4840]: E1209 18:06:10.612126 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:06:19 crc kubenswrapper[4840]: E1209 18:06:19.612373 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:06:21 crc kubenswrapper[4840]: E1209 18:06:21.610560 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:06:32 crc kubenswrapper[4840]: E1209 18:06:32.611038 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:06:34 crc kubenswrapper[4840]: E1209 18:06:34.620752 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:06:47 crc kubenswrapper[4840]: E1209 18:06:47.611115 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:06:47 crc kubenswrapper[4840]: E1209 18:06:47.611538 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:07:00 crc kubenswrapper[4840]: E1209 18:07:00.610847 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:07:01 crc kubenswrapper[4840]: E1209 18:07:01.611628 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:07:04 crc kubenswrapper[4840]: I1209 18:07:04.036399 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:07:04 crc kubenswrapper[4840]: I1209 18:07:04.036838 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:07:12 crc kubenswrapper[4840]: E1209 18:07:12.611210 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:07:13 crc kubenswrapper[4840]: E1209 18:07:13.630286 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:07:23 crc kubenswrapper[4840]: E1209 18:07:23.612621 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:07:28 crc kubenswrapper[4840]: E1209 18:07:28.612133 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:07:34 crc kubenswrapper[4840]: I1209 18:07:34.036016 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:07:34 crc kubenswrapper[4840]: I1209 18:07:34.036433 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:07:35 crc kubenswrapper[4840]: E1209 18:07:35.612637 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:07:40 crc kubenswrapper[4840]: E1209 18:07:40.612299 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:07:47 crc kubenswrapper[4840]: E1209 18:07:47.611143 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:07:51 crc kubenswrapper[4840]: E1209 18:07:51.613759 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:08:01 crc kubenswrapper[4840]: E1209 18:08:01.612918 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:08:04 crc kubenswrapper[4840]: I1209 18:08:04.036935 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:08:04 crc kubenswrapper[4840]: I1209 18:08:04.037304 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:08:04 crc kubenswrapper[4840]: I1209 18:08:04.037355 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 18:08:04 crc kubenswrapper[4840]: I1209 18:08:04.038270 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 18:08:04 crc kubenswrapper[4840]: I1209 18:08:04.038343 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" gracePeriod=600 Dec 09 18:08:04 crc kubenswrapper[4840]: E1209 18:08:04.703210 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:08:04 crc kubenswrapper[4840]: I1209 18:08:04.914473 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" exitCode=0 Dec 09 18:08:04 crc kubenswrapper[4840]: I1209 18:08:04.914534 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141"} Dec 09 18:08:04 crc kubenswrapper[4840]: I1209 18:08:04.914576 4840 scope.go:117] "RemoveContainer" containerID="69d5991ffa1b4237f2cfce739eb1afc218c8c538da38015d667538aeea0a385e" Dec 09 18:08:04 crc kubenswrapper[4840]: I1209 18:08:04.915389 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:08:04 crc kubenswrapper[4840]: E1209 18:08:04.915773 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:08:06 crc kubenswrapper[4840]: I1209 18:08:06.613481 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 18:08:06 crc kubenswrapper[4840]: E1209 18:08:06.743661 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:08:06 crc kubenswrapper[4840]: E1209 18:08:06.743771 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:08:06 crc kubenswrapper[4840]: E1209 18:08:06.744013 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:08:06 crc kubenswrapper[4840]: E1209 18:08:06.745407 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:08:15 crc kubenswrapper[4840]: E1209 18:08:15.612223 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:08:17 crc kubenswrapper[4840]: I1209 18:08:17.610623 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:08:17 crc kubenswrapper[4840]: E1209 18:08:17.611686 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:08:21 crc kubenswrapper[4840]: E1209 18:08:21.613010 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:08:28 crc kubenswrapper[4840]: E1209 18:08:28.611407 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:08:32 crc kubenswrapper[4840]: I1209 18:08:32.609359 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:08:32 crc kubenswrapper[4840]: E1209 18:08:32.610436 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:08:34 crc kubenswrapper[4840]: E1209 18:08:34.625193 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:08:39 crc kubenswrapper[4840]: E1209 18:08:39.611981 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:08:45 crc kubenswrapper[4840]: I1209 18:08:45.609600 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:08:45 crc kubenswrapper[4840]: E1209 18:08:45.610750 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:08:48 crc kubenswrapper[4840]: E1209 18:08:48.612276 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:08:51 crc kubenswrapper[4840]: E1209 18:08:51.734767 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:08:51 crc kubenswrapper[4840]: E1209 18:08:51.737311 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:08:51 crc kubenswrapper[4840]: E1209 18:08:51.737595 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:08:51 crc kubenswrapper[4840]: E1209 18:08:51.738932 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:09:00 crc kubenswrapper[4840]: I1209 18:09:00.608928 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:09:00 crc kubenswrapper[4840]: E1209 18:09:00.609633 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:09:02 crc kubenswrapper[4840]: E1209 18:09:02.610741 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:09:07 crc kubenswrapper[4840]: E1209 18:09:07.610874 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:09:14 crc kubenswrapper[4840]: I1209 18:09:14.637871 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:09:14 crc kubenswrapper[4840]: E1209 18:09:14.638797 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:09:16 crc kubenswrapper[4840]: E1209 18:09:16.610634 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:09:22 crc kubenswrapper[4840]: E1209 18:09:22.610748 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:09:27 crc kubenswrapper[4840]: I1209 18:09:27.609299 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:09:27 crc kubenswrapper[4840]: E1209 18:09:27.610149 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:09:29 crc kubenswrapper[4840]: E1209 18:09:29.610583 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:09:35 crc kubenswrapper[4840]: E1209 18:09:35.611387 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:09:39 crc kubenswrapper[4840]: I1209 18:09:39.609636 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:09:39 crc kubenswrapper[4840]: E1209 18:09:39.610687 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:09:40 crc kubenswrapper[4840]: E1209 18:09:40.612231 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:09:50 crc kubenswrapper[4840]: I1209 18:09:50.614830 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:09:50 crc kubenswrapper[4840]: E1209 18:09:50.615511 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:09:51 crc kubenswrapper[4840]: E1209 18:09:51.611528 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:09:51 crc kubenswrapper[4840]: E1209 18:09:51.612014 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:10:02 crc kubenswrapper[4840]: E1209 18:10:02.610670 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:10:05 crc kubenswrapper[4840]: I1209 18:10:05.608008 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:10:05 crc kubenswrapper[4840]: E1209 18:10:05.609416 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:10:06 crc kubenswrapper[4840]: E1209 18:10:06.611880 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:10:15 crc kubenswrapper[4840]: E1209 18:10:15.611122 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:10:16 crc kubenswrapper[4840]: I1209 18:10:16.609164 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:10:16 crc kubenswrapper[4840]: E1209 18:10:16.609523 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:10:18 crc kubenswrapper[4840]: E1209 18:10:18.644210 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:10:27 crc kubenswrapper[4840]: E1209 18:10:27.610382 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:10:28 crc kubenswrapper[4840]: I1209 18:10:28.609794 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:10:28 crc kubenswrapper[4840]: E1209 18:10:28.610176 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:10:30 crc kubenswrapper[4840]: E1209 18:10:30.611947 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:10:42 crc kubenswrapper[4840]: I1209 18:10:42.608420 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:10:42 crc kubenswrapper[4840]: E1209 18:10:42.610135 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:10:42 crc kubenswrapper[4840]: E1209 18:10:42.610207 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:10:43 crc kubenswrapper[4840]: E1209 18:10:43.610156 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:10:45 crc kubenswrapper[4840]: I1209 18:10:45.790151 4840 generic.go:334] "Generic (PLEG): container finished" podID="38054200-bff9-439b-a60f-ff6f3b8926f0" containerID="af432df564e475ab6d094b31aa44a5698e2403f75aedea6850465e37efa86977" exitCode=2 Dec 09 18:10:45 crc kubenswrapper[4840]: I1209 18:10:45.790290 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" event={"ID":"38054200-bff9-439b-a60f-ff6f3b8926f0","Type":"ContainerDied","Data":"af432df564e475ab6d094b31aa44a5698e2403f75aedea6850465e37efa86977"} Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.340905 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.401450 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-inventory\") pod \"38054200-bff9-439b-a60f-ff6f3b8926f0\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.401831 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-ssh-key\") pod \"38054200-bff9-439b-a60f-ff6f3b8926f0\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.402030 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jkld\" (UniqueName: \"kubernetes.io/projected/38054200-bff9-439b-a60f-ff6f3b8926f0-kube-api-access-2jkld\") pod \"38054200-bff9-439b-a60f-ff6f3b8926f0\" (UID: \"38054200-bff9-439b-a60f-ff6f3b8926f0\") " Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.407359 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38054200-bff9-439b-a60f-ff6f3b8926f0-kube-api-access-2jkld" (OuterVolumeSpecName: "kube-api-access-2jkld") pod "38054200-bff9-439b-a60f-ff6f3b8926f0" (UID: "38054200-bff9-439b-a60f-ff6f3b8926f0"). InnerVolumeSpecName "kube-api-access-2jkld". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.434661 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-inventory" (OuterVolumeSpecName: "inventory") pod "38054200-bff9-439b-a60f-ff6f3b8926f0" (UID: "38054200-bff9-439b-a60f-ff6f3b8926f0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.435209 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "38054200-bff9-439b-a60f-ff6f3b8926f0" (UID: "38054200-bff9-439b-a60f-ff6f3b8926f0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.504415 4840 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.504447 4840 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38054200-bff9-439b-a60f-ff6f3b8926f0-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.504457 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jkld\" (UniqueName: \"kubernetes.io/projected/38054200-bff9-439b-a60f-ff6f3b8926f0-kube-api-access-2jkld\") on node \"crc\" DevicePath \"\"" Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.813656 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" event={"ID":"38054200-bff9-439b-a60f-ff6f3b8926f0","Type":"ContainerDied","Data":"8b325d7573dbb16b89b61d5a9047b6cffb6b70bce4ca1babaa094914e7713ce3"} Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.813708 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b325d7573dbb16b89b61d5a9047b6cffb6b70bce4ca1babaa094914e7713ce3" Dec 09 18:10:47 crc kubenswrapper[4840]: I1209 18:10:47.813738 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-77shc" Dec 09 18:10:55 crc kubenswrapper[4840]: E1209 18:10:55.610993 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:10:57 crc kubenswrapper[4840]: I1209 18:10:57.609007 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:10:57 crc kubenswrapper[4840]: E1209 18:10:57.609660 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:10:57 crc kubenswrapper[4840]: E1209 18:10:57.610610 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:11:08 crc kubenswrapper[4840]: I1209 18:11:08.609261 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:11:08 crc kubenswrapper[4840]: E1209 18:11:08.610206 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:11:08 crc kubenswrapper[4840]: E1209 18:11:08.611520 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:11:09 crc kubenswrapper[4840]: E1209 18:11:09.610872 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:11:13 crc kubenswrapper[4840]: I1209 18:11:13.006238 4840 scope.go:117] "RemoveContainer" containerID="d0fbefe063e2f97a29dd6b7afe216e66629e4ba8a8df35f691d4ec63fc4888be" Dec 09 18:11:13 crc kubenswrapper[4840]: I1209 18:11:13.040654 4840 scope.go:117] "RemoveContainer" containerID="ef7e56f444ddcf96041c80edef220ac6b0c7739423c5b04ed40890d30a30ed6c" Dec 09 18:11:13 crc kubenswrapper[4840]: I1209 18:11:13.127186 4840 scope.go:117] "RemoveContainer" containerID="ee84eab71323ce107ba2025b5fdc0be9cd3c7e5134aa115f7f7e01198d634e3f" Dec 09 18:11:21 crc kubenswrapper[4840]: I1209 18:11:21.610599 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:11:21 crc kubenswrapper[4840]: E1209 18:11:21.611577 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:11:21 crc kubenswrapper[4840]: E1209 18:11:21.611596 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:11:21 crc kubenswrapper[4840]: E1209 18:11:21.611668 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:11:33 crc kubenswrapper[4840]: E1209 18:11:33.612075 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:11:35 crc kubenswrapper[4840]: I1209 18:11:35.609200 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:11:35 crc kubenswrapper[4840]: E1209 18:11:35.609738 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:11:35 crc kubenswrapper[4840]: E1209 18:11:35.610831 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:11:46 crc kubenswrapper[4840]: E1209 18:11:46.612062 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:11:48 crc kubenswrapper[4840]: I1209 18:11:48.610141 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:11:48 crc kubenswrapper[4840]: E1209 18:11:48.610953 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:11:49 crc kubenswrapper[4840]: E1209 18:11:49.611204 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:11:58 crc kubenswrapper[4840]: E1209 18:11:58.611795 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:12:01 crc kubenswrapper[4840]: I1209 18:12:01.609856 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:12:01 crc kubenswrapper[4840]: E1209 18:12:01.611205 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:12:04 crc kubenswrapper[4840]: E1209 18:12:04.618940 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:12:09 crc kubenswrapper[4840]: E1209 18:12:09.611427 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:12:16 crc kubenswrapper[4840]: I1209 18:12:16.609458 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:12:16 crc kubenswrapper[4840]: E1209 18:12:16.610606 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:12:18 crc kubenswrapper[4840]: E1209 18:12:18.613036 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:12:23 crc kubenswrapper[4840]: E1209 18:12:23.612142 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:12:27 crc kubenswrapper[4840]: I1209 18:12:27.608435 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:12:27 crc kubenswrapper[4840]: E1209 18:12:27.609261 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:12:33 crc kubenswrapper[4840]: E1209 18:12:33.612879 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:12:38 crc kubenswrapper[4840]: E1209 18:12:38.612361 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:12:39 crc kubenswrapper[4840]: I1209 18:12:39.609896 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:12:39 crc kubenswrapper[4840]: E1209 18:12:39.610869 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:12:47 crc kubenswrapper[4840]: E1209 18:12:47.611516 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:12:52 crc kubenswrapper[4840]: E1209 18:12:52.611199 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:12:53 crc kubenswrapper[4840]: I1209 18:12:53.609034 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:12:53 crc kubenswrapper[4840]: E1209 18:12:53.609294 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:12:58 crc kubenswrapper[4840]: E1209 18:12:58.613599 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:13:06 crc kubenswrapper[4840]: I1209 18:13:06.608892 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:13:06 crc kubenswrapper[4840]: E1209 18:13:06.610451 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.370167 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"81851b50dc62877bca83a306e48e2e839fba87763c5a012e7c3b37601d8213d5"} Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.761130 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gvpcc"] Dec 09 18:13:07 crc kubenswrapper[4840]: E1209 18:13:07.761986 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" containerName="extract-content" Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.762005 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" containerName="extract-content" Dec 09 18:13:07 crc kubenswrapper[4840]: E1209 18:13:07.762028 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" containerName="registry-server" Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.762036 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" containerName="registry-server" Dec 09 18:13:07 crc kubenswrapper[4840]: E1209 18:13:07.762069 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38054200-bff9-439b-a60f-ff6f3b8926f0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.762081 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="38054200-bff9-439b-a60f-ff6f3b8926f0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 18:13:07 crc kubenswrapper[4840]: E1209 18:13:07.762100 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" containerName="extract-utilities" Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.762108 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" containerName="extract-utilities" Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.762375 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="38054200-bff9-439b-a60f-ff6f3b8926f0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.762412 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffe2d4b9-a4b6-40b7-a2f8-9d47e1588b7d" containerName="registry-server" Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.766890 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.770675 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gvpcc"] Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.902205 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fml5b\" (UniqueName: \"kubernetes.io/projected/0622661e-ca65-4967-b4b9-1c8441def2bc-kube-api-access-fml5b\") pod \"redhat-operators-gvpcc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.902275 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-utilities\") pod \"redhat-operators-gvpcc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:07 crc kubenswrapper[4840]: I1209 18:13:07.902301 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-catalog-content\") pod \"redhat-operators-gvpcc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:08 crc kubenswrapper[4840]: I1209 18:13:08.004225 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fml5b\" (UniqueName: \"kubernetes.io/projected/0622661e-ca65-4967-b4b9-1c8441def2bc-kube-api-access-fml5b\") pod \"redhat-operators-gvpcc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:08 crc kubenswrapper[4840]: I1209 18:13:08.004288 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-utilities\") pod \"redhat-operators-gvpcc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:08 crc kubenswrapper[4840]: I1209 18:13:08.004304 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-catalog-content\") pod \"redhat-operators-gvpcc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:08 crc kubenswrapper[4840]: I1209 18:13:08.004924 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-catalog-content\") pod \"redhat-operators-gvpcc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:08 crc kubenswrapper[4840]: I1209 18:13:08.005481 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-utilities\") pod \"redhat-operators-gvpcc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:08 crc kubenswrapper[4840]: I1209 18:13:08.176173 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fml5b\" (UniqueName: \"kubernetes.io/projected/0622661e-ca65-4967-b4b9-1c8441def2bc-kube-api-access-fml5b\") pod \"redhat-operators-gvpcc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:08 crc kubenswrapper[4840]: I1209 18:13:08.389648 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:08 crc kubenswrapper[4840]: I1209 18:13:08.889569 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gvpcc"] Dec 09 18:13:08 crc kubenswrapper[4840]: W1209 18:13:08.903163 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0622661e_ca65_4967_b4b9_1c8441def2bc.slice/crio-46e32752cc9180cf0262d774f7c5e2a0a2763db4d0fc98c582fb56efd8680a56 WatchSource:0}: Error finding container 46e32752cc9180cf0262d774f7c5e2a0a2763db4d0fc98c582fb56efd8680a56: Status 404 returned error can't find the container with id 46e32752cc9180cf0262d774f7c5e2a0a2763db4d0fc98c582fb56efd8680a56 Dec 09 18:13:09 crc kubenswrapper[4840]: I1209 18:13:09.392404 4840 generic.go:334] "Generic (PLEG): container finished" podID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerID="2a3e8fefc9ccaa67d625f28d69d58eb8d332decede6a83158c83f10107853700" exitCode=0 Dec 09 18:13:09 crc kubenswrapper[4840]: I1209 18:13:09.392459 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvpcc" event={"ID":"0622661e-ca65-4967-b4b9-1c8441def2bc","Type":"ContainerDied","Data":"2a3e8fefc9ccaa67d625f28d69d58eb8d332decede6a83158c83f10107853700"} Dec 09 18:13:09 crc kubenswrapper[4840]: I1209 18:13:09.392687 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvpcc" event={"ID":"0622661e-ca65-4967-b4b9-1c8441def2bc","Type":"ContainerStarted","Data":"46e32752cc9180cf0262d774f7c5e2a0a2763db4d0fc98c582fb56efd8680a56"} Dec 09 18:13:09 crc kubenswrapper[4840]: I1209 18:13:09.399561 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 18:13:11 crc kubenswrapper[4840]: I1209 18:13:11.418635 4840 generic.go:334] "Generic (PLEG): container finished" podID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerID="1ed9edc51a589816e6a77fa9cf8b76bc87155e63c92cd2eaa90a30e963cdbecb" exitCode=0 Dec 09 18:13:11 crc kubenswrapper[4840]: I1209 18:13:11.418733 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvpcc" event={"ID":"0622661e-ca65-4967-b4b9-1c8441def2bc","Type":"ContainerDied","Data":"1ed9edc51a589816e6a77fa9cf8b76bc87155e63c92cd2eaa90a30e963cdbecb"} Dec 09 18:13:11 crc kubenswrapper[4840]: E1209 18:13:11.610453 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:13:14 crc kubenswrapper[4840]: I1209 18:13:14.467759 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvpcc" event={"ID":"0622661e-ca65-4967-b4b9-1c8441def2bc","Type":"ContainerStarted","Data":"37f3412e01b240d1731c8197856ba5038dac03dc5b49a95eddfd179a534081b1"} Dec 09 18:13:14 crc kubenswrapper[4840]: I1209 18:13:14.505273 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gvpcc" podStartSLOduration=3.9967785239999998 podStartE2EDuration="7.505251853s" podCreationTimestamp="2025-12-09 18:13:07 +0000 UTC" firstStartedPulling="2025-12-09 18:13:09.399340841 +0000 UTC m=+4575.390451474" lastFinishedPulling="2025-12-09 18:13:12.90781417 +0000 UTC m=+4578.898924803" observedRunningTime="2025-12-09 18:13:14.492458596 +0000 UTC m=+4580.483569249" watchObservedRunningTime="2025-12-09 18:13:14.505251853 +0000 UTC m=+4580.496362496" Dec 09 18:13:18 crc kubenswrapper[4840]: I1209 18:13:18.390612 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:18 crc kubenswrapper[4840]: I1209 18:13:18.393151 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:19 crc kubenswrapper[4840]: I1209 18:13:19.627313 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gvpcc" podUID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerName="registry-server" probeResult="failure" output=< Dec 09 18:13:19 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 18:13:19 crc kubenswrapper[4840]: > Dec 09 18:13:20 crc kubenswrapper[4840]: E1209 18:13:20.714384 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:13:20 crc kubenswrapper[4840]: E1209 18:13:20.714715 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:13:20 crc kubenswrapper[4840]: E1209 18:13:20.714838 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:13:20 crc kubenswrapper[4840]: E1209 18:13:20.716201 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:13:22 crc kubenswrapper[4840]: E1209 18:13:22.612357 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:13:28 crc kubenswrapper[4840]: I1209 18:13:28.461072 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:28 crc kubenswrapper[4840]: I1209 18:13:28.532957 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:28 crc kubenswrapper[4840]: I1209 18:13:28.707332 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gvpcc"] Dec 09 18:13:29 crc kubenswrapper[4840]: I1209 18:13:29.640707 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gvpcc" podUID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerName="registry-server" containerID="cri-o://37f3412e01b240d1731c8197856ba5038dac03dc5b49a95eddfd179a534081b1" gracePeriod=2 Dec 09 18:13:30 crc kubenswrapper[4840]: I1209 18:13:30.674817 4840 generic.go:334] "Generic (PLEG): container finished" podID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerID="37f3412e01b240d1731c8197856ba5038dac03dc5b49a95eddfd179a534081b1" exitCode=0 Dec 09 18:13:30 crc kubenswrapper[4840]: I1209 18:13:30.674907 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvpcc" event={"ID":"0622661e-ca65-4967-b4b9-1c8441def2bc","Type":"ContainerDied","Data":"37f3412e01b240d1731c8197856ba5038dac03dc5b49a95eddfd179a534081b1"} Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.023057 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.165390 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fml5b\" (UniqueName: \"kubernetes.io/projected/0622661e-ca65-4967-b4b9-1c8441def2bc-kube-api-access-fml5b\") pod \"0622661e-ca65-4967-b4b9-1c8441def2bc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.165456 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-catalog-content\") pod \"0622661e-ca65-4967-b4b9-1c8441def2bc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.165695 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-utilities\") pod \"0622661e-ca65-4967-b4b9-1c8441def2bc\" (UID: \"0622661e-ca65-4967-b4b9-1c8441def2bc\") " Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.167952 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-utilities" (OuterVolumeSpecName: "utilities") pod "0622661e-ca65-4967-b4b9-1c8441def2bc" (UID: "0622661e-ca65-4967-b4b9-1c8441def2bc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.185791 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0622661e-ca65-4967-b4b9-1c8441def2bc-kube-api-access-fml5b" (OuterVolumeSpecName: "kube-api-access-fml5b") pod "0622661e-ca65-4967-b4b9-1c8441def2bc" (UID: "0622661e-ca65-4967-b4b9-1c8441def2bc"). InnerVolumeSpecName "kube-api-access-fml5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.268588 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.268631 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fml5b\" (UniqueName: \"kubernetes.io/projected/0622661e-ca65-4967-b4b9-1c8441def2bc-kube-api-access-fml5b\") on node \"crc\" DevicePath \"\"" Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.285653 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0622661e-ca65-4967-b4b9-1c8441def2bc" (UID: "0622661e-ca65-4967-b4b9-1c8441def2bc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.371017 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0622661e-ca65-4967-b4b9-1c8441def2bc-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.688454 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvpcc" event={"ID":"0622661e-ca65-4967-b4b9-1c8441def2bc","Type":"ContainerDied","Data":"46e32752cc9180cf0262d774f7c5e2a0a2763db4d0fc98c582fb56efd8680a56"} Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.688506 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gvpcc" Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.688534 4840 scope.go:117] "RemoveContainer" containerID="37f3412e01b240d1731c8197856ba5038dac03dc5b49a95eddfd179a534081b1" Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.713437 4840 scope.go:117] "RemoveContainer" containerID="1ed9edc51a589816e6a77fa9cf8b76bc87155e63c92cd2eaa90a30e963cdbecb" Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.730741 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gvpcc"] Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.741754 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gvpcc"] Dec 09 18:13:31 crc kubenswrapper[4840]: I1209 18:13:31.763082 4840 scope.go:117] "RemoveContainer" containerID="2a3e8fefc9ccaa67d625f28d69d58eb8d332decede6a83158c83f10107853700" Dec 09 18:13:32 crc kubenswrapper[4840]: E1209 18:13:32.610256 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:13:32 crc kubenswrapper[4840]: I1209 18:13:32.624291 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0622661e-ca65-4967-b4b9-1c8441def2bc" path="/var/lib/kubelet/pods/0622661e-ca65-4967-b4b9-1c8441def2bc/volumes" Dec 09 18:13:36 crc kubenswrapper[4840]: E1209 18:13:36.611187 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:13:43 crc kubenswrapper[4840]: E1209 18:13:43.611362 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:13:51 crc kubenswrapper[4840]: E1209 18:13:51.613936 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:13:57 crc kubenswrapper[4840]: E1209 18:13:57.609946 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:14:03 crc kubenswrapper[4840]: E1209 18:14:03.714255 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:14:03 crc kubenswrapper[4840]: E1209 18:14:03.714911 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:14:03 crc kubenswrapper[4840]: E1209 18:14:03.715160 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:14:03 crc kubenswrapper[4840]: E1209 18:14:03.717256 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:14:08 crc kubenswrapper[4840]: E1209 18:14:08.625215 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:14:17 crc kubenswrapper[4840]: E1209 18:14:17.612269 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:14:23 crc kubenswrapper[4840]: E1209 18:14:23.610592 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:14:29 crc kubenswrapper[4840]: E1209 18:14:29.611369 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:14:35 crc kubenswrapper[4840]: E1209 18:14:35.611099 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:14:40 crc kubenswrapper[4840]: E1209 18:14:40.612373 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:14:47 crc kubenswrapper[4840]: E1209 18:14:47.611343 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:14:52 crc kubenswrapper[4840]: E1209 18:14:52.611543 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.214064 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm"] Dec 09 18:15:00 crc kubenswrapper[4840]: E1209 18:15:00.217298 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerName="registry-server" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.217423 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerName="registry-server" Dec 09 18:15:00 crc kubenswrapper[4840]: E1209 18:15:00.217549 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerName="extract-utilities" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.217629 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerName="extract-utilities" Dec 09 18:15:00 crc kubenswrapper[4840]: E1209 18:15:00.217716 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerName="extract-content" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.217791 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerName="extract-content" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.218108 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="0622661e-ca65-4967-b4b9-1c8441def2bc" containerName="registry-server" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.218977 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.221203 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.221239 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.229923 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm"] Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.344711 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e3e42433-9089-4826-8f19-ca044f898eca-secret-volume\") pod \"collect-profiles-29421735-mdcnm\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.344765 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jppd\" (UniqueName: \"kubernetes.io/projected/e3e42433-9089-4826-8f19-ca044f898eca-kube-api-access-8jppd\") pod \"collect-profiles-29421735-mdcnm\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.344825 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e3e42433-9089-4826-8f19-ca044f898eca-config-volume\") pod \"collect-profiles-29421735-mdcnm\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.451182 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e3e42433-9089-4826-8f19-ca044f898eca-secret-volume\") pod \"collect-profiles-29421735-mdcnm\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.451246 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jppd\" (UniqueName: \"kubernetes.io/projected/e3e42433-9089-4826-8f19-ca044f898eca-kube-api-access-8jppd\") pod \"collect-profiles-29421735-mdcnm\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.451320 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e3e42433-9089-4826-8f19-ca044f898eca-config-volume\") pod \"collect-profiles-29421735-mdcnm\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.452448 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e3e42433-9089-4826-8f19-ca044f898eca-config-volume\") pod \"collect-profiles-29421735-mdcnm\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.685859 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e3e42433-9089-4826-8f19-ca044f898eca-secret-volume\") pod \"collect-profiles-29421735-mdcnm\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.687266 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jppd\" (UniqueName: \"kubernetes.io/projected/e3e42433-9089-4826-8f19-ca044f898eca-kube-api-access-8jppd\") pod \"collect-profiles-29421735-mdcnm\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:00 crc kubenswrapper[4840]: I1209 18:15:00.850057 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:01 crc kubenswrapper[4840]: I1209 18:15:01.307887 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm"] Dec 09 18:15:01 crc kubenswrapper[4840]: I1209 18:15:01.785720 4840 generic.go:334] "Generic (PLEG): container finished" podID="e3e42433-9089-4826-8f19-ca044f898eca" containerID="400209b6aa9381d38c412f4a127d786bf6ca0dfc636930bbd79ffa3c6f1e44a3" exitCode=0 Dec 09 18:15:01 crc kubenswrapper[4840]: I1209 18:15:01.786004 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" event={"ID":"e3e42433-9089-4826-8f19-ca044f898eca","Type":"ContainerDied","Data":"400209b6aa9381d38c412f4a127d786bf6ca0dfc636930bbd79ffa3c6f1e44a3"} Dec 09 18:15:01 crc kubenswrapper[4840]: I1209 18:15:01.786036 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" event={"ID":"e3e42433-9089-4826-8f19-ca044f898eca","Type":"ContainerStarted","Data":"542f6327036ff0cddd97b3f1d791f1c055019ae56398f4bf178b622b113aa26c"} Dec 09 18:15:02 crc kubenswrapper[4840]: E1209 18:15:02.611055 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.231619 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.319532 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e3e42433-9089-4826-8f19-ca044f898eca-secret-volume\") pod \"e3e42433-9089-4826-8f19-ca044f898eca\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.319587 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jppd\" (UniqueName: \"kubernetes.io/projected/e3e42433-9089-4826-8f19-ca044f898eca-kube-api-access-8jppd\") pod \"e3e42433-9089-4826-8f19-ca044f898eca\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.319712 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e3e42433-9089-4826-8f19-ca044f898eca-config-volume\") pod \"e3e42433-9089-4826-8f19-ca044f898eca\" (UID: \"e3e42433-9089-4826-8f19-ca044f898eca\") " Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.321008 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3e42433-9089-4826-8f19-ca044f898eca-config-volume" (OuterVolumeSpecName: "config-volume") pod "e3e42433-9089-4826-8f19-ca044f898eca" (UID: "e3e42433-9089-4826-8f19-ca044f898eca"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.326196 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3e42433-9089-4826-8f19-ca044f898eca-kube-api-access-8jppd" (OuterVolumeSpecName: "kube-api-access-8jppd") pod "e3e42433-9089-4826-8f19-ca044f898eca" (UID: "e3e42433-9089-4826-8f19-ca044f898eca"). InnerVolumeSpecName "kube-api-access-8jppd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.335272 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3e42433-9089-4826-8f19-ca044f898eca-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e3e42433-9089-4826-8f19-ca044f898eca" (UID: "e3e42433-9089-4826-8f19-ca044f898eca"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.421778 4840 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e3e42433-9089-4826-8f19-ca044f898eca-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.421809 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jppd\" (UniqueName: \"kubernetes.io/projected/e3e42433-9089-4826-8f19-ca044f898eca-kube-api-access-8jppd\") on node \"crc\" DevicePath \"\"" Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.421820 4840 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e3e42433-9089-4826-8f19-ca044f898eca-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.808190 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" event={"ID":"e3e42433-9089-4826-8f19-ca044f898eca","Type":"ContainerDied","Data":"542f6327036ff0cddd97b3f1d791f1c055019ae56398f4bf178b622b113aa26c"} Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.808222 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421735-mdcnm" Dec 09 18:15:03 crc kubenswrapper[4840]: I1209 18:15:03.808237 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="542f6327036ff0cddd97b3f1d791f1c055019ae56398f4bf178b622b113aa26c" Dec 09 18:15:04 crc kubenswrapper[4840]: I1209 18:15:04.336249 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b"] Dec 09 18:15:04 crc kubenswrapper[4840]: I1209 18:15:04.348893 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421690-cq86b"] Dec 09 18:15:04 crc kubenswrapper[4840]: I1209 18:15:04.621089 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8c1ca2b-b533-41a6-a909-c6132352e702" path="/var/lib/kubelet/pods/e8c1ca2b-b533-41a6-a909-c6132352e702/volumes" Dec 09 18:15:05 crc kubenswrapper[4840]: E1209 18:15:05.610806 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:15:13 crc kubenswrapper[4840]: I1209 18:15:13.316370 4840 scope.go:117] "RemoveContainer" containerID="a951986796ad1f061b96696236ab0e8d0154898a786c55470f7cbe6b363bf1ca" Dec 09 18:15:17 crc kubenswrapper[4840]: E1209 18:15:17.612163 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:15:18 crc kubenswrapper[4840]: E1209 18:15:18.610795 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.201678 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vx2qz"] Dec 09 18:15:24 crc kubenswrapper[4840]: E1209 18:15:24.203245 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3e42433-9089-4826-8f19-ca044f898eca" containerName="collect-profiles" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.203282 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3e42433-9089-4826-8f19-ca044f898eca" containerName="collect-profiles" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.204104 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3e42433-9089-4826-8f19-ca044f898eca" containerName="collect-profiles" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.212248 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.227090 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vx2qz"] Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.330015 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-utilities\") pod \"certified-operators-vx2qz\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.330093 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-catalog-content\") pod \"certified-operators-vx2qz\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.330148 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wr4kj\" (UniqueName: \"kubernetes.io/projected/1311e95b-6330-4d14-aa53-e8432d0a48d9-kube-api-access-wr4kj\") pod \"certified-operators-vx2qz\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.431796 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-catalog-content\") pod \"certified-operators-vx2qz\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.432205 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wr4kj\" (UniqueName: \"kubernetes.io/projected/1311e95b-6330-4d14-aa53-e8432d0a48d9-kube-api-access-wr4kj\") pod \"certified-operators-vx2qz\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.432451 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-catalog-content\") pod \"certified-operators-vx2qz\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.432811 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-utilities\") pod \"certified-operators-vx2qz\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.433199 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-utilities\") pod \"certified-operators-vx2qz\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.461145 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wr4kj\" (UniqueName: \"kubernetes.io/projected/1311e95b-6330-4d14-aa53-e8432d0a48d9-kube-api-access-wr4kj\") pod \"certified-operators-vx2qz\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:24 crc kubenswrapper[4840]: I1209 18:15:24.548071 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:25 crc kubenswrapper[4840]: I1209 18:15:25.064264 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vx2qz"] Dec 09 18:15:25 crc kubenswrapper[4840]: W1209 18:15:25.384253 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1311e95b_6330_4d14_aa53_e8432d0a48d9.slice/crio-b279333f09edbfd2a4d13ab843570834b5274537f8dab87ddf2a361b8d40e6c6 WatchSource:0}: Error finding container b279333f09edbfd2a4d13ab843570834b5274537f8dab87ddf2a361b8d40e6c6: Status 404 returned error can't find the container with id b279333f09edbfd2a4d13ab843570834b5274537f8dab87ddf2a361b8d40e6c6 Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.076863 4840 generic.go:334] "Generic (PLEG): container finished" podID="1311e95b-6330-4d14-aa53-e8432d0a48d9" containerID="0ebc5b3111289026eb38a10983188bd65f18a42886aced4c53b888c31b6278bb" exitCode=0 Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.076921 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx2qz" event={"ID":"1311e95b-6330-4d14-aa53-e8432d0a48d9","Type":"ContainerDied","Data":"0ebc5b3111289026eb38a10983188bd65f18a42886aced4c53b888c31b6278bb"} Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.077111 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx2qz" event={"ID":"1311e95b-6330-4d14-aa53-e8432d0a48d9","Type":"ContainerStarted","Data":"b279333f09edbfd2a4d13ab843570834b5274537f8dab87ddf2a361b8d40e6c6"} Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.375992 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6nc6q"] Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.378175 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.389068 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6nc6q"] Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.474676 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcxkr\" (UniqueName: \"kubernetes.io/projected/f18f05b0-0f8b-46eb-b617-49262fcd29d4-kube-api-access-qcxkr\") pod \"redhat-marketplace-6nc6q\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.474766 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-catalog-content\") pod \"redhat-marketplace-6nc6q\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.474803 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-utilities\") pod \"redhat-marketplace-6nc6q\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.577450 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-utilities\") pod \"redhat-marketplace-6nc6q\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.577853 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxkr\" (UniqueName: \"kubernetes.io/projected/f18f05b0-0f8b-46eb-b617-49262fcd29d4-kube-api-access-qcxkr\") pod \"redhat-marketplace-6nc6q\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.577913 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-utilities\") pod \"redhat-marketplace-6nc6q\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.578149 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-catalog-content\") pod \"redhat-marketplace-6nc6q\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.578478 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-catalog-content\") pod \"redhat-marketplace-6nc6q\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.597556 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcxkr\" (UniqueName: \"kubernetes.io/projected/f18f05b0-0f8b-46eb-b617-49262fcd29d4-kube-api-access-qcxkr\") pod \"redhat-marketplace-6nc6q\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:26 crc kubenswrapper[4840]: I1209 18:15:26.693511 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:27 crc kubenswrapper[4840]: I1209 18:15:27.303631 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6nc6q"] Dec 09 18:15:28 crc kubenswrapper[4840]: I1209 18:15:28.096141 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nc6q" event={"ID":"f18f05b0-0f8b-46eb-b617-49262fcd29d4","Type":"ContainerStarted","Data":"77f340cea6824daef089a37b05cdc8fa1143757e76950ca0778093aa810fc528"} Dec 09 18:15:29 crc kubenswrapper[4840]: I1209 18:15:29.112426 4840 generic.go:334] "Generic (PLEG): container finished" podID="1311e95b-6330-4d14-aa53-e8432d0a48d9" containerID="fa63f61d32bd109113919f05442cec288081cd671415b00be25a476096bdaee4" exitCode=0 Dec 09 18:15:29 crc kubenswrapper[4840]: I1209 18:15:29.112522 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx2qz" event={"ID":"1311e95b-6330-4d14-aa53-e8432d0a48d9","Type":"ContainerDied","Data":"fa63f61d32bd109113919f05442cec288081cd671415b00be25a476096bdaee4"} Dec 09 18:15:29 crc kubenswrapper[4840]: I1209 18:15:29.116416 4840 generic.go:334] "Generic (PLEG): container finished" podID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" containerID="7d420811c6babce96e9481b16a545233c50ac82f8167e760512567f0ad62b0b5" exitCode=0 Dec 09 18:15:29 crc kubenswrapper[4840]: I1209 18:15:29.116461 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nc6q" event={"ID":"f18f05b0-0f8b-46eb-b617-49262fcd29d4","Type":"ContainerDied","Data":"7d420811c6babce96e9481b16a545233c50ac82f8167e760512567f0ad62b0b5"} Dec 09 18:15:30 crc kubenswrapper[4840]: I1209 18:15:30.131775 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx2qz" event={"ID":"1311e95b-6330-4d14-aa53-e8432d0a48d9","Type":"ContainerStarted","Data":"b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f"} Dec 09 18:15:30 crc kubenswrapper[4840]: I1209 18:15:30.135260 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nc6q" event={"ID":"f18f05b0-0f8b-46eb-b617-49262fcd29d4","Type":"ContainerStarted","Data":"cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96"} Dec 09 18:15:30 crc kubenswrapper[4840]: I1209 18:15:30.154279 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vx2qz" podStartSLOduration=2.405353914 podStartE2EDuration="6.154259783s" podCreationTimestamp="2025-12-09 18:15:24 +0000 UTC" firstStartedPulling="2025-12-09 18:15:26.078976694 +0000 UTC m=+4712.070087327" lastFinishedPulling="2025-12-09 18:15:29.827882553 +0000 UTC m=+4715.818993196" observedRunningTime="2025-12-09 18:15:30.15310013 +0000 UTC m=+4716.144210763" watchObservedRunningTime="2025-12-09 18:15:30.154259783 +0000 UTC m=+4716.145370416" Dec 09 18:15:30 crc kubenswrapper[4840]: E1209 18:15:30.609782 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:15:31 crc kubenswrapper[4840]: I1209 18:15:31.145591 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nc6q" event={"ID":"f18f05b0-0f8b-46eb-b617-49262fcd29d4","Type":"ContainerDied","Data":"cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96"} Dec 09 18:15:31 crc kubenswrapper[4840]: I1209 18:15:31.145543 4840 generic.go:334] "Generic (PLEG): container finished" podID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" containerID="cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96" exitCode=0 Dec 09 18:15:32 crc kubenswrapper[4840]: I1209 18:15:32.159762 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nc6q" event={"ID":"f18f05b0-0f8b-46eb-b617-49262fcd29d4","Type":"ContainerStarted","Data":"e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174"} Dec 09 18:15:32 crc kubenswrapper[4840]: I1209 18:15:32.190849 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6nc6q" podStartSLOduration=3.390867022 podStartE2EDuration="6.190829167s" podCreationTimestamp="2025-12-09 18:15:26 +0000 UTC" firstStartedPulling="2025-12-09 18:15:29.118135153 +0000 UTC m=+4715.109245826" lastFinishedPulling="2025-12-09 18:15:31.918097328 +0000 UTC m=+4717.909207971" observedRunningTime="2025-12-09 18:15:32.184584408 +0000 UTC m=+4718.175695071" watchObservedRunningTime="2025-12-09 18:15:32.190829167 +0000 UTC m=+4718.181939810" Dec 09 18:15:32 crc kubenswrapper[4840]: E1209 18:15:32.611378 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:15:34 crc kubenswrapper[4840]: I1209 18:15:34.036187 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:15:34 crc kubenswrapper[4840]: I1209 18:15:34.036532 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:15:34 crc kubenswrapper[4840]: I1209 18:15:34.549310 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:34 crc kubenswrapper[4840]: I1209 18:15:34.549408 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:34 crc kubenswrapper[4840]: I1209 18:15:34.629693 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:35 crc kubenswrapper[4840]: I1209 18:15:35.239903 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:36 crc kubenswrapper[4840]: I1209 18:15:36.694162 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:36 crc kubenswrapper[4840]: I1209 18:15:36.695529 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:36 crc kubenswrapper[4840]: I1209 18:15:36.779191 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.187799 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vx2qz"] Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.217083 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vx2qz" podUID="1311e95b-6330-4d14-aa53-e8432d0a48d9" containerName="registry-server" containerID="cri-o://b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f" gracePeriod=2 Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.278974 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.722222 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.825200 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-utilities\") pod \"1311e95b-6330-4d14-aa53-e8432d0a48d9\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.825383 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-catalog-content\") pod \"1311e95b-6330-4d14-aa53-e8432d0a48d9\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.825673 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wr4kj\" (UniqueName: \"kubernetes.io/projected/1311e95b-6330-4d14-aa53-e8432d0a48d9-kube-api-access-wr4kj\") pod \"1311e95b-6330-4d14-aa53-e8432d0a48d9\" (UID: \"1311e95b-6330-4d14-aa53-e8432d0a48d9\") " Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.826453 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-utilities" (OuterVolumeSpecName: "utilities") pod "1311e95b-6330-4d14-aa53-e8432d0a48d9" (UID: "1311e95b-6330-4d14-aa53-e8432d0a48d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.826912 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.838898 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1311e95b-6330-4d14-aa53-e8432d0a48d9-kube-api-access-wr4kj" (OuterVolumeSpecName: "kube-api-access-wr4kj") pod "1311e95b-6330-4d14-aa53-e8432d0a48d9" (UID: "1311e95b-6330-4d14-aa53-e8432d0a48d9"). InnerVolumeSpecName "kube-api-access-wr4kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.906879 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1311e95b-6330-4d14-aa53-e8432d0a48d9" (UID: "1311e95b-6330-4d14-aa53-e8432d0a48d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.929470 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1311e95b-6330-4d14-aa53-e8432d0a48d9-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:15:37 crc kubenswrapper[4840]: I1209 18:15:37.929507 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wr4kj\" (UniqueName: \"kubernetes.io/projected/1311e95b-6330-4d14-aa53-e8432d0a48d9-kube-api-access-wr4kj\") on node \"crc\" DevicePath \"\"" Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.240563 4840 generic.go:334] "Generic (PLEG): container finished" podID="1311e95b-6330-4d14-aa53-e8432d0a48d9" containerID="b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f" exitCode=0 Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.240668 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vx2qz" Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.240775 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx2qz" event={"ID":"1311e95b-6330-4d14-aa53-e8432d0a48d9","Type":"ContainerDied","Data":"b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f"} Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.240821 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx2qz" event={"ID":"1311e95b-6330-4d14-aa53-e8432d0a48d9","Type":"ContainerDied","Data":"b279333f09edbfd2a4d13ab843570834b5274537f8dab87ddf2a361b8d40e6c6"} Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.240851 4840 scope.go:117] "RemoveContainer" containerID="b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f" Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.270157 4840 scope.go:117] "RemoveContainer" containerID="fa63f61d32bd109113919f05442cec288081cd671415b00be25a476096bdaee4" Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.290564 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vx2qz"] Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.306105 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vx2qz"] Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.315926 4840 scope.go:117] "RemoveContainer" containerID="0ebc5b3111289026eb38a10983188bd65f18a42886aced4c53b888c31b6278bb" Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.380276 4840 scope.go:117] "RemoveContainer" containerID="b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f" Dec 09 18:15:38 crc kubenswrapper[4840]: E1209 18:15:38.383243 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f\": container with ID starting with b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f not found: ID does not exist" containerID="b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f" Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.383299 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f"} err="failed to get container status \"b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f\": rpc error: code = NotFound desc = could not find container \"b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f\": container with ID starting with b3483e70f22b85240f0e8bcafc542afbf5c7c396d1f922f3dd4822311cf2b58f not found: ID does not exist" Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.383336 4840 scope.go:117] "RemoveContainer" containerID="fa63f61d32bd109113919f05442cec288081cd671415b00be25a476096bdaee4" Dec 09 18:15:38 crc kubenswrapper[4840]: E1209 18:15:38.384932 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa63f61d32bd109113919f05442cec288081cd671415b00be25a476096bdaee4\": container with ID starting with fa63f61d32bd109113919f05442cec288081cd671415b00be25a476096bdaee4 not found: ID does not exist" containerID="fa63f61d32bd109113919f05442cec288081cd671415b00be25a476096bdaee4" Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.385047 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa63f61d32bd109113919f05442cec288081cd671415b00be25a476096bdaee4"} err="failed to get container status \"fa63f61d32bd109113919f05442cec288081cd671415b00be25a476096bdaee4\": rpc error: code = NotFound desc = could not find container \"fa63f61d32bd109113919f05442cec288081cd671415b00be25a476096bdaee4\": container with ID starting with fa63f61d32bd109113919f05442cec288081cd671415b00be25a476096bdaee4 not found: ID does not exist" Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.385132 4840 scope.go:117] "RemoveContainer" containerID="0ebc5b3111289026eb38a10983188bd65f18a42886aced4c53b888c31b6278bb" Dec 09 18:15:38 crc kubenswrapper[4840]: E1209 18:15:38.386654 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ebc5b3111289026eb38a10983188bd65f18a42886aced4c53b888c31b6278bb\": container with ID starting with 0ebc5b3111289026eb38a10983188bd65f18a42886aced4c53b888c31b6278bb not found: ID does not exist" containerID="0ebc5b3111289026eb38a10983188bd65f18a42886aced4c53b888c31b6278bb" Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.386691 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ebc5b3111289026eb38a10983188bd65f18a42886aced4c53b888c31b6278bb"} err="failed to get container status \"0ebc5b3111289026eb38a10983188bd65f18a42886aced4c53b888c31b6278bb\": rpc error: code = NotFound desc = could not find container \"0ebc5b3111289026eb38a10983188bd65f18a42886aced4c53b888c31b6278bb\": container with ID starting with 0ebc5b3111289026eb38a10983188bd65f18a42886aced4c53b888c31b6278bb not found: ID does not exist" Dec 09 18:15:38 crc kubenswrapper[4840]: I1209 18:15:38.623238 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1311e95b-6330-4d14-aa53-e8432d0a48d9" path="/var/lib/kubelet/pods/1311e95b-6330-4d14-aa53-e8432d0a48d9/volumes" Dec 09 18:15:39 crc kubenswrapper[4840]: I1209 18:15:39.571930 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6nc6q"] Dec 09 18:15:40 crc kubenswrapper[4840]: I1209 18:15:40.263166 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6nc6q" podUID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" containerName="registry-server" containerID="cri-o://e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174" gracePeriod=2 Dec 09 18:15:40 crc kubenswrapper[4840]: I1209 18:15:40.805054 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:40 crc kubenswrapper[4840]: I1209 18:15:40.898046 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-utilities\") pod \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " Dec 09 18:15:40 crc kubenswrapper[4840]: I1209 18:15:40.898141 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qcxkr\" (UniqueName: \"kubernetes.io/projected/f18f05b0-0f8b-46eb-b617-49262fcd29d4-kube-api-access-qcxkr\") pod \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " Dec 09 18:15:40 crc kubenswrapper[4840]: I1209 18:15:40.898241 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-catalog-content\") pod \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\" (UID: \"f18f05b0-0f8b-46eb-b617-49262fcd29d4\") " Dec 09 18:15:40 crc kubenswrapper[4840]: I1209 18:15:40.898738 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-utilities" (OuterVolumeSpecName: "utilities") pod "f18f05b0-0f8b-46eb-b617-49262fcd29d4" (UID: "f18f05b0-0f8b-46eb-b617-49262fcd29d4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:15:40 crc kubenswrapper[4840]: I1209 18:15:40.904049 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f18f05b0-0f8b-46eb-b617-49262fcd29d4-kube-api-access-qcxkr" (OuterVolumeSpecName: "kube-api-access-qcxkr") pod "f18f05b0-0f8b-46eb-b617-49262fcd29d4" (UID: "f18f05b0-0f8b-46eb-b617-49262fcd29d4"). InnerVolumeSpecName "kube-api-access-qcxkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:15:40 crc kubenswrapper[4840]: I1209 18:15:40.927612 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f18f05b0-0f8b-46eb-b617-49262fcd29d4" (UID: "f18f05b0-0f8b-46eb-b617-49262fcd29d4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.000953 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.001161 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f18f05b0-0f8b-46eb-b617-49262fcd29d4-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.001263 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qcxkr\" (UniqueName: \"kubernetes.io/projected/f18f05b0-0f8b-46eb-b617-49262fcd29d4-kube-api-access-qcxkr\") on node \"crc\" DevicePath \"\"" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.275960 4840 generic.go:334] "Generic (PLEG): container finished" podID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" containerID="e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174" exitCode=0 Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.276053 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6nc6q" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.276066 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nc6q" event={"ID":"f18f05b0-0f8b-46eb-b617-49262fcd29d4","Type":"ContainerDied","Data":"e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174"} Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.276146 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nc6q" event={"ID":"f18f05b0-0f8b-46eb-b617-49262fcd29d4","Type":"ContainerDied","Data":"77f340cea6824daef089a37b05cdc8fa1143757e76950ca0778093aa810fc528"} Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.276179 4840 scope.go:117] "RemoveContainer" containerID="e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.322737 4840 scope.go:117] "RemoveContainer" containerID="cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.327915 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6nc6q"] Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.339639 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6nc6q"] Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.358099 4840 scope.go:117] "RemoveContainer" containerID="7d420811c6babce96e9481b16a545233c50ac82f8167e760512567f0ad62b0b5" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.410919 4840 scope.go:117] "RemoveContainer" containerID="e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174" Dec 09 18:15:41 crc kubenswrapper[4840]: E1209 18:15:41.411606 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174\": container with ID starting with e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174 not found: ID does not exist" containerID="e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.411666 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174"} err="failed to get container status \"e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174\": rpc error: code = NotFound desc = could not find container \"e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174\": container with ID starting with e460fcbdd0673449b9b72b78fbc763918a17e19d13e64cc5cd357cdbf2979174 not found: ID does not exist" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.411708 4840 scope.go:117] "RemoveContainer" containerID="cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96" Dec 09 18:15:41 crc kubenswrapper[4840]: E1209 18:15:41.412252 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96\": container with ID starting with cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96 not found: ID does not exist" containerID="cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.412436 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96"} err="failed to get container status \"cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96\": rpc error: code = NotFound desc = could not find container \"cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96\": container with ID starting with cf983a165c6d365f5edc679e2521bd4eda687956c113aaef94119ab0fe6cdb96 not found: ID does not exist" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.412619 4840 scope.go:117] "RemoveContainer" containerID="7d420811c6babce96e9481b16a545233c50ac82f8167e760512567f0ad62b0b5" Dec 09 18:15:41 crc kubenswrapper[4840]: E1209 18:15:41.413038 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d420811c6babce96e9481b16a545233c50ac82f8167e760512567f0ad62b0b5\": container with ID starting with 7d420811c6babce96e9481b16a545233c50ac82f8167e760512567f0ad62b0b5 not found: ID does not exist" containerID="7d420811c6babce96e9481b16a545233c50ac82f8167e760512567f0ad62b0b5" Dec 09 18:15:41 crc kubenswrapper[4840]: I1209 18:15:41.413068 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d420811c6babce96e9481b16a545233c50ac82f8167e760512567f0ad62b0b5"} err="failed to get container status \"7d420811c6babce96e9481b16a545233c50ac82f8167e760512567f0ad62b0b5\": rpc error: code = NotFound desc = could not find container \"7d420811c6babce96e9481b16a545233c50ac82f8167e760512567f0ad62b0b5\": container with ID starting with 7d420811c6babce96e9481b16a545233c50ac82f8167e760512567f0ad62b0b5 not found: ID does not exist" Dec 09 18:15:41 crc kubenswrapper[4840]: E1209 18:15:41.476815 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf18f05b0_0f8b_46eb_b617_49262fcd29d4.slice/crio-77f340cea6824daef089a37b05cdc8fa1143757e76950ca0778093aa810fc528\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf18f05b0_0f8b_46eb_b617_49262fcd29d4.slice\": RecentStats: unable to find data in memory cache]" Dec 09 18:15:41 crc kubenswrapper[4840]: E1209 18:15:41.610824 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:15:42 crc kubenswrapper[4840]: I1209 18:15:42.626394 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" path="/var/lib/kubelet/pods/f18f05b0-0f8b-46eb-b617-49262fcd29d4/volumes" Dec 09 18:15:44 crc kubenswrapper[4840]: E1209 18:15:44.620266 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:15:54 crc kubenswrapper[4840]: E1209 18:15:54.628365 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:15:55 crc kubenswrapper[4840]: E1209 18:15:55.610257 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:16:04 crc kubenswrapper[4840]: I1209 18:16:04.036885 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:16:04 crc kubenswrapper[4840]: I1209 18:16:04.037874 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.042535 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr"] Dec 09 18:16:05 crc kubenswrapper[4840]: E1209 18:16:05.043672 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1311e95b-6330-4d14-aa53-e8432d0a48d9" containerName="registry-server" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.043754 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1311e95b-6330-4d14-aa53-e8432d0a48d9" containerName="registry-server" Dec 09 18:16:05 crc kubenswrapper[4840]: E1209 18:16:05.043791 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" containerName="extract-utilities" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.043809 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" containerName="extract-utilities" Dec 09 18:16:05 crc kubenswrapper[4840]: E1209 18:16:05.043864 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1311e95b-6330-4d14-aa53-e8432d0a48d9" containerName="extract-utilities" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.043884 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1311e95b-6330-4d14-aa53-e8432d0a48d9" containerName="extract-utilities" Dec 09 18:16:05 crc kubenswrapper[4840]: E1209 18:16:05.043939 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" containerName="extract-content" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.043958 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" containerName="extract-content" Dec 09 18:16:05 crc kubenswrapper[4840]: E1209 18:16:05.044022 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" containerName="registry-server" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.044041 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" containerName="registry-server" Dec 09 18:16:05 crc kubenswrapper[4840]: E1209 18:16:05.044096 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1311e95b-6330-4d14-aa53-e8432d0a48d9" containerName="extract-content" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.044116 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="1311e95b-6330-4d14-aa53-e8432d0a48d9" containerName="extract-content" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.044495 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="f18f05b0-0f8b-46eb-b617-49262fcd29d4" containerName="registry-server" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.044533 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="1311e95b-6330-4d14-aa53-e8432d0a48d9" containerName="registry-server" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.046061 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.049296 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.050790 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.051251 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.052310 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qrgfg" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.076478 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr"] Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.161746 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.161868 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9kc8\" (UniqueName: \"kubernetes.io/projected/20290271-4f20-4407-b1a4-063880514c1e-kube-api-access-s9kc8\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.161914 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.264830 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.265076 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9kc8\" (UniqueName: \"kubernetes.io/projected/20290271-4f20-4407-b1a4-063880514c1e-kube-api-access-s9kc8\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.265150 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.276044 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.276097 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.294789 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9kc8\" (UniqueName: \"kubernetes.io/projected/20290271-4f20-4407-b1a4-063880514c1e-kube-api-access-s9kc8\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.378485 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:16:05 crc kubenswrapper[4840]: I1209 18:16:05.973091 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr"] Dec 09 18:16:06 crc kubenswrapper[4840]: I1209 18:16:06.585695 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" event={"ID":"20290271-4f20-4407-b1a4-063880514c1e","Type":"ContainerStarted","Data":"093dba91c02a4a65d71b726ad14da91b58887159a6c50b45aa08e5e4204af6ae"} Dec 09 18:16:06 crc kubenswrapper[4840]: E1209 18:16:06.610532 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:16:07 crc kubenswrapper[4840]: I1209 18:16:07.594637 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" event={"ID":"20290271-4f20-4407-b1a4-063880514c1e","Type":"ContainerStarted","Data":"ba9c1ed8e57eee14cb0de563e96ecae46c6e3010a14c4e66017fd1819420e7b9"} Dec 09 18:16:07 crc kubenswrapper[4840]: E1209 18:16:07.610315 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:16:07 crc kubenswrapper[4840]: I1209 18:16:07.621042 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" podStartSLOduration=1.752695887 podStartE2EDuration="2.621023854s" podCreationTimestamp="2025-12-09 18:16:05 +0000 UTC" firstStartedPulling="2025-12-09 18:16:05.980312708 +0000 UTC m=+4751.971423351" lastFinishedPulling="2025-12-09 18:16:06.848640685 +0000 UTC m=+4752.839751318" observedRunningTime="2025-12-09 18:16:07.616411091 +0000 UTC m=+4753.607521754" watchObservedRunningTime="2025-12-09 18:16:07.621023854 +0000 UTC m=+4753.612134477" Dec 09 18:16:20 crc kubenswrapper[4840]: E1209 18:16:20.611513 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:16:20 crc kubenswrapper[4840]: E1209 18:16:20.612687 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:16:34 crc kubenswrapper[4840]: I1209 18:16:34.036412 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:16:34 crc kubenswrapper[4840]: I1209 18:16:34.036863 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:16:34 crc kubenswrapper[4840]: I1209 18:16:34.036910 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 18:16:34 crc kubenswrapper[4840]: I1209 18:16:34.037672 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"81851b50dc62877bca83a306e48e2e839fba87763c5a012e7c3b37601d8213d5"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 18:16:34 crc kubenswrapper[4840]: I1209 18:16:34.037718 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://81851b50dc62877bca83a306e48e2e839fba87763c5a012e7c3b37601d8213d5" gracePeriod=600 Dec 09 18:16:34 crc kubenswrapper[4840]: E1209 18:16:34.637801 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:16:34 crc kubenswrapper[4840]: I1209 18:16:34.889649 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="81851b50dc62877bca83a306e48e2e839fba87763c5a012e7c3b37601d8213d5" exitCode=0 Dec 09 18:16:34 crc kubenswrapper[4840]: I1209 18:16:34.889748 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"81851b50dc62877bca83a306e48e2e839fba87763c5a012e7c3b37601d8213d5"} Dec 09 18:16:34 crc kubenswrapper[4840]: I1209 18:16:34.890036 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210"} Dec 09 18:16:34 crc kubenswrapper[4840]: I1209 18:16:34.890060 4840 scope.go:117] "RemoveContainer" containerID="f4431c0c49dc8d65cca8d1bcbf5495ab369b1f0570542f5706fb0b3545824141" Dec 09 18:16:35 crc kubenswrapper[4840]: E1209 18:16:35.612223 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:16:47 crc kubenswrapper[4840]: E1209 18:16:47.611117 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:16:50 crc kubenswrapper[4840]: E1209 18:16:50.612538 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.163901 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kn5mq"] Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.169205 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.181456 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kn5mq"] Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.334447 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px8ql\" (UniqueName: \"kubernetes.io/projected/8be887e1-067d-4f90-ba57-c335c0cc3346-kube-api-access-px8ql\") pod \"community-operators-kn5mq\" (UID: \"8be887e1-067d-4f90-ba57-c335c0cc3346\") " pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.334498 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8be887e1-067d-4f90-ba57-c335c0cc3346-catalog-content\") pod \"community-operators-kn5mq\" (UID: \"8be887e1-067d-4f90-ba57-c335c0cc3346\") " pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.334681 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8be887e1-067d-4f90-ba57-c335c0cc3346-utilities\") pod \"community-operators-kn5mq\" (UID: \"8be887e1-067d-4f90-ba57-c335c0cc3346\") " pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.436391 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px8ql\" (UniqueName: \"kubernetes.io/projected/8be887e1-067d-4f90-ba57-c335c0cc3346-kube-api-access-px8ql\") pod \"community-operators-kn5mq\" (UID: \"8be887e1-067d-4f90-ba57-c335c0cc3346\") " pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.436438 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8be887e1-067d-4f90-ba57-c335c0cc3346-catalog-content\") pod \"community-operators-kn5mq\" (UID: \"8be887e1-067d-4f90-ba57-c335c0cc3346\") " pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.436520 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8be887e1-067d-4f90-ba57-c335c0cc3346-utilities\") pod \"community-operators-kn5mq\" (UID: \"8be887e1-067d-4f90-ba57-c335c0cc3346\") " pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.437089 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8be887e1-067d-4f90-ba57-c335c0cc3346-catalog-content\") pod \"community-operators-kn5mq\" (UID: \"8be887e1-067d-4f90-ba57-c335c0cc3346\") " pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.437160 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8be887e1-067d-4f90-ba57-c335c0cc3346-utilities\") pod \"community-operators-kn5mq\" (UID: \"8be887e1-067d-4f90-ba57-c335c0cc3346\") " pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.460956 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px8ql\" (UniqueName: \"kubernetes.io/projected/8be887e1-067d-4f90-ba57-c335c0cc3346-kube-api-access-px8ql\") pod \"community-operators-kn5mq\" (UID: \"8be887e1-067d-4f90-ba57-c335c0cc3346\") " pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:16:51 crc kubenswrapper[4840]: I1209 18:16:51.490332 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:16:52 crc kubenswrapper[4840]: I1209 18:16:52.054588 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kn5mq"] Dec 09 18:16:52 crc kubenswrapper[4840]: I1209 18:16:52.091636 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kn5mq" event={"ID":"8be887e1-067d-4f90-ba57-c335c0cc3346","Type":"ContainerStarted","Data":"dacba5d28e23f12022c5da180a3b165b0e88f73fd2c90ff9b8f08077f18f8235"} Dec 09 18:16:53 crc kubenswrapper[4840]: I1209 18:16:53.106033 4840 generic.go:334] "Generic (PLEG): container finished" podID="8be887e1-067d-4f90-ba57-c335c0cc3346" containerID="90639a51fa19253857c76011f96826c0056149079477cae00312bf0b07b1b53c" exitCode=0 Dec 09 18:16:53 crc kubenswrapper[4840]: I1209 18:16:53.106123 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kn5mq" event={"ID":"8be887e1-067d-4f90-ba57-c335c0cc3346","Type":"ContainerDied","Data":"90639a51fa19253857c76011f96826c0056149079477cae00312bf0b07b1b53c"} Dec 09 18:16:58 crc kubenswrapper[4840]: I1209 18:16:58.154668 4840 generic.go:334] "Generic (PLEG): container finished" podID="8be887e1-067d-4f90-ba57-c335c0cc3346" containerID="185a4e0a853e63416247e8e849bcb12c4667859606dc004c258b1ea615f26aa2" exitCode=0 Dec 09 18:16:58 crc kubenswrapper[4840]: I1209 18:16:58.154764 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kn5mq" event={"ID":"8be887e1-067d-4f90-ba57-c335c0cc3346","Type":"ContainerDied","Data":"185a4e0a853e63416247e8e849bcb12c4667859606dc004c258b1ea615f26aa2"} Dec 09 18:16:58 crc kubenswrapper[4840]: E1209 18:16:58.611875 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:17:01 crc kubenswrapper[4840]: I1209 18:17:01.183590 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kn5mq" event={"ID":"8be887e1-067d-4f90-ba57-c335c0cc3346","Type":"ContainerStarted","Data":"d692e423b2b9bc80c7637be235e895dda94376f0c405e3560759b17fd3cfbad1"} Dec 09 18:17:01 crc kubenswrapper[4840]: I1209 18:17:01.203368 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kn5mq" podStartSLOduration=2.985581019 podStartE2EDuration="10.203349088s" podCreationTimestamp="2025-12-09 18:16:51 +0000 UTC" firstStartedPulling="2025-12-09 18:16:53.108557973 +0000 UTC m=+4799.099668616" lastFinishedPulling="2025-12-09 18:17:00.326326052 +0000 UTC m=+4806.317436685" observedRunningTime="2025-12-09 18:17:01.201730522 +0000 UTC m=+4807.192841175" watchObservedRunningTime="2025-12-09 18:17:01.203349088 +0000 UTC m=+4807.194459741" Dec 09 18:17:01 crc kubenswrapper[4840]: I1209 18:17:01.491340 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:17:01 crc kubenswrapper[4840]: I1209 18:17:01.491639 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:17:02 crc kubenswrapper[4840]: I1209 18:17:02.544932 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-kn5mq" podUID="8be887e1-067d-4f90-ba57-c335c0cc3346" containerName="registry-server" probeResult="failure" output=< Dec 09 18:17:02 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 18:17:02 crc kubenswrapper[4840]: > Dec 09 18:17:02 crc kubenswrapper[4840]: E1209 18:17:02.609920 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:17:11 crc kubenswrapper[4840]: I1209 18:17:11.569882 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:17:11 crc kubenswrapper[4840]: I1209 18:17:11.643846 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kn5mq" Dec 09 18:17:11 crc kubenswrapper[4840]: I1209 18:17:11.713327 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kn5mq"] Dec 09 18:17:11 crc kubenswrapper[4840]: I1209 18:17:11.816735 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l9nvq"] Dec 09 18:17:11 crc kubenswrapper[4840]: I1209 18:17:11.817133 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l9nvq" podUID="263ae667-376a-4b0b-8509-0342fddb0392" containerName="registry-server" containerID="cri-o://4deaee9ca5268ef4d0cb4ebc3b7d69150edcc29cbbe8bc30b154566d1c24b5de" gracePeriod=2 Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.317338 4840 generic.go:334] "Generic (PLEG): container finished" podID="263ae667-376a-4b0b-8509-0342fddb0392" containerID="4deaee9ca5268ef4d0cb4ebc3b7d69150edcc29cbbe8bc30b154566d1c24b5de" exitCode=0 Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.318164 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9nvq" event={"ID":"263ae667-376a-4b0b-8509-0342fddb0392","Type":"ContainerDied","Data":"4deaee9ca5268ef4d0cb4ebc3b7d69150edcc29cbbe8bc30b154566d1c24b5de"} Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.318222 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9nvq" event={"ID":"263ae667-376a-4b0b-8509-0342fddb0392","Type":"ContainerDied","Data":"b157a6fa25402082b66803486227dcb301eafbbed375eff9571f92458c971774"} Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.318238 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b157a6fa25402082b66803486227dcb301eafbbed375eff9571f92458c971774" Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.394642 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9nvq" Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.551790 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-catalog-content\") pod \"263ae667-376a-4b0b-8509-0342fddb0392\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.552036 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9v896\" (UniqueName: \"kubernetes.io/projected/263ae667-376a-4b0b-8509-0342fddb0392-kube-api-access-9v896\") pod \"263ae667-376a-4b0b-8509-0342fddb0392\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.552074 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-utilities\") pod \"263ae667-376a-4b0b-8509-0342fddb0392\" (UID: \"263ae667-376a-4b0b-8509-0342fddb0392\") " Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.552573 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-utilities" (OuterVolumeSpecName: "utilities") pod "263ae667-376a-4b0b-8509-0342fddb0392" (UID: "263ae667-376a-4b0b-8509-0342fddb0392"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.552730 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.558954 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/263ae667-376a-4b0b-8509-0342fddb0392-kube-api-access-9v896" (OuterVolumeSpecName: "kube-api-access-9v896") pod "263ae667-376a-4b0b-8509-0342fddb0392" (UID: "263ae667-376a-4b0b-8509-0342fddb0392"). InnerVolumeSpecName "kube-api-access-9v896". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.611688 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "263ae667-376a-4b0b-8509-0342fddb0392" (UID: "263ae667-376a-4b0b-8509-0342fddb0392"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.655472 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/263ae667-376a-4b0b-8509-0342fddb0392-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:17:12 crc kubenswrapper[4840]: I1209 18:17:12.655524 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9v896\" (UniqueName: \"kubernetes.io/projected/263ae667-376a-4b0b-8509-0342fddb0392-kube-api-access-9v896\") on node \"crc\" DevicePath \"\"" Dec 09 18:17:13 crc kubenswrapper[4840]: I1209 18:17:13.324652 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9nvq" Dec 09 18:17:13 crc kubenswrapper[4840]: I1209 18:17:13.357158 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l9nvq"] Dec 09 18:17:13 crc kubenswrapper[4840]: I1209 18:17:13.365718 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l9nvq"] Dec 09 18:17:13 crc kubenswrapper[4840]: I1209 18:17:13.436556 4840 scope.go:117] "RemoveContainer" containerID="05ad94fb2bbe77119d88d99910619b3fa29b497ef8dca659ab719772b7ad5330" Dec 09 18:17:13 crc kubenswrapper[4840]: I1209 18:17:13.468465 4840 scope.go:117] "RemoveContainer" containerID="4deaee9ca5268ef4d0cb4ebc3b7d69150edcc29cbbe8bc30b154566d1c24b5de" Dec 09 18:17:13 crc kubenswrapper[4840]: I1209 18:17:13.507379 4840 scope.go:117] "RemoveContainer" containerID="a8da44282bbc1991bbd3042d491e15afa757927b6652bb8e9962018a61e7cd2e" Dec 09 18:17:14 crc kubenswrapper[4840]: E1209 18:17:14.624324 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:17:14 crc kubenswrapper[4840]: I1209 18:17:14.624357 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="263ae667-376a-4b0b-8509-0342fddb0392" path="/var/lib/kubelet/pods/263ae667-376a-4b0b-8509-0342fddb0392/volumes" Dec 09 18:17:15 crc kubenswrapper[4840]: E1209 18:17:15.610481 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:17:27 crc kubenswrapper[4840]: E1209 18:17:27.612021 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:17:30 crc kubenswrapper[4840]: E1209 18:17:30.611802 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:17:42 crc kubenswrapper[4840]: E1209 18:17:42.611891 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:17:45 crc kubenswrapper[4840]: E1209 18:17:45.610405 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:17:54 crc kubenswrapper[4840]: E1209 18:17:54.616229 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:17:59 crc kubenswrapper[4840]: E1209 18:17:59.612264 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:18:06 crc kubenswrapper[4840]: E1209 18:18:06.613164 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:18:13 crc kubenswrapper[4840]: E1209 18:18:13.612546 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:18:18 crc kubenswrapper[4840]: E1209 18:18:18.611196 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:18:27 crc kubenswrapper[4840]: I1209 18:18:27.612477 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 18:18:27 crc kubenswrapper[4840]: E1209 18:18:27.744748 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:18:27 crc kubenswrapper[4840]: E1209 18:18:27.744826 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:18:27 crc kubenswrapper[4840]: E1209 18:18:27.745004 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:18:27 crc kubenswrapper[4840]: E1209 18:18:27.746241 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:18:29 crc kubenswrapper[4840]: E1209 18:18:29.612049 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:18:34 crc kubenswrapper[4840]: I1209 18:18:34.036495 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:18:34 crc kubenswrapper[4840]: I1209 18:18:34.037118 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:18:41 crc kubenswrapper[4840]: E1209 18:18:41.611809 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:18:42 crc kubenswrapper[4840]: E1209 18:18:42.610956 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:18:55 crc kubenswrapper[4840]: E1209 18:18:55.612817 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:18:56 crc kubenswrapper[4840]: E1209 18:18:56.610372 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:19:04 crc kubenswrapper[4840]: I1209 18:19:04.036086 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:19:04 crc kubenswrapper[4840]: I1209 18:19:04.036748 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:19:09 crc kubenswrapper[4840]: E1209 18:19:09.610248 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:19:10 crc kubenswrapper[4840]: E1209 18:19:10.731844 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:19:10 crc kubenswrapper[4840]: E1209 18:19:10.732230 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:19:10 crc kubenswrapper[4840]: E1209 18:19:10.732370 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:19:10 crc kubenswrapper[4840]: E1209 18:19:10.733602 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:19:23 crc kubenswrapper[4840]: E1209 18:19:23.610434 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:19:24 crc kubenswrapper[4840]: E1209 18:19:24.619077 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:19:34 crc kubenswrapper[4840]: I1209 18:19:34.036122 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:19:34 crc kubenswrapper[4840]: I1209 18:19:34.037069 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:19:34 crc kubenswrapper[4840]: I1209 18:19:34.037165 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 18:19:34 crc kubenswrapper[4840]: I1209 18:19:34.038616 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 18:19:34 crc kubenswrapper[4840]: I1209 18:19:34.038706 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" gracePeriod=600 Dec 09 18:19:34 crc kubenswrapper[4840]: E1209 18:19:34.161568 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:19:34 crc kubenswrapper[4840]: I1209 18:19:34.927254 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" exitCode=0 Dec 09 18:19:34 crc kubenswrapper[4840]: I1209 18:19:34.927469 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210"} Dec 09 18:19:34 crc kubenswrapper[4840]: I1209 18:19:34.927563 4840 scope.go:117] "RemoveContainer" containerID="81851b50dc62877bca83a306e48e2e839fba87763c5a012e7c3b37601d8213d5" Dec 09 18:19:34 crc kubenswrapper[4840]: I1209 18:19:34.928461 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:19:34 crc kubenswrapper[4840]: E1209 18:19:34.928803 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:19:35 crc kubenswrapper[4840]: E1209 18:19:35.612186 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:19:36 crc kubenswrapper[4840]: E1209 18:19:36.609581 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:19:45 crc kubenswrapper[4840]: I1209 18:19:45.608405 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:19:45 crc kubenswrapper[4840]: E1209 18:19:45.609342 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:19:47 crc kubenswrapper[4840]: E1209 18:19:47.610589 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:19:49 crc kubenswrapper[4840]: E1209 18:19:49.634935 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:20:00 crc kubenswrapper[4840]: I1209 18:20:00.609343 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:20:00 crc kubenswrapper[4840]: E1209 18:20:00.610505 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:20:00 crc kubenswrapper[4840]: E1209 18:20:00.611463 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:20:00 crc kubenswrapper[4840]: E1209 18:20:00.615773 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:20:11 crc kubenswrapper[4840]: E1209 18:20:11.610386 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:20:13 crc kubenswrapper[4840]: I1209 18:20:13.609131 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:20:13 crc kubenswrapper[4840]: E1209 18:20:13.610383 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:20:14 crc kubenswrapper[4840]: E1209 18:20:14.625304 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:20:25 crc kubenswrapper[4840]: E1209 18:20:25.612609 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:20:27 crc kubenswrapper[4840]: I1209 18:20:27.133217 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-7d5bcffc7c-n8wfr" podUID="e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Dec 09 18:20:28 crc kubenswrapper[4840]: I1209 18:20:28.609156 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:20:28 crc kubenswrapper[4840]: E1209 18:20:28.609829 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:20:29 crc kubenswrapper[4840]: E1209 18:20:29.611403 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:20:37 crc kubenswrapper[4840]: E1209 18:20:37.620461 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:20:41 crc kubenswrapper[4840]: E1209 18:20:41.611218 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:20:43 crc kubenswrapper[4840]: I1209 18:20:43.609528 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:20:43 crc kubenswrapper[4840]: E1209 18:20:43.610322 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:20:48 crc kubenswrapper[4840]: E1209 18:20:48.612350 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:20:54 crc kubenswrapper[4840]: I1209 18:20:54.621997 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:20:54 crc kubenswrapper[4840]: E1209 18:20:54.624891 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:20:55 crc kubenswrapper[4840]: E1209 18:20:55.610860 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:21:02 crc kubenswrapper[4840]: E1209 18:21:02.613265 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:21:07 crc kubenswrapper[4840]: E1209 18:21:07.612500 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:21:08 crc kubenswrapper[4840]: I1209 18:21:08.608823 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:21:08 crc kubenswrapper[4840]: E1209 18:21:08.609222 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:21:17 crc kubenswrapper[4840]: E1209 18:21:17.613565 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:21:19 crc kubenswrapper[4840]: E1209 18:21:19.612151 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:21:20 crc kubenswrapper[4840]: I1209 18:21:20.609447 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:21:20 crc kubenswrapper[4840]: E1209 18:21:20.610430 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:21:28 crc kubenswrapper[4840]: E1209 18:21:28.612188 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:21:33 crc kubenswrapper[4840]: E1209 18:21:33.611624 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:21:34 crc kubenswrapper[4840]: I1209 18:21:34.619209 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:21:34 crc kubenswrapper[4840]: E1209 18:21:34.621780 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:21:43 crc kubenswrapper[4840]: E1209 18:21:43.611596 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:21:47 crc kubenswrapper[4840]: I1209 18:21:47.608375 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:21:47 crc kubenswrapper[4840]: E1209 18:21:47.609297 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:21:47 crc kubenswrapper[4840]: E1209 18:21:47.610624 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:21:58 crc kubenswrapper[4840]: E1209 18:21:58.613172 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:22:00 crc kubenswrapper[4840]: E1209 18:22:00.611505 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:22:02 crc kubenswrapper[4840]: I1209 18:22:02.610319 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:22:02 crc kubenswrapper[4840]: E1209 18:22:02.611261 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:22:11 crc kubenswrapper[4840]: E1209 18:22:11.611227 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:22:13 crc kubenswrapper[4840]: I1209 18:22:13.609727 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:22:13 crc kubenswrapper[4840]: E1209 18:22:13.610580 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:22:15 crc kubenswrapper[4840]: E1209 18:22:15.610694 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:22:26 crc kubenswrapper[4840]: E1209 18:22:26.611195 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:22:27 crc kubenswrapper[4840]: I1209 18:22:27.621345 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:22:27 crc kubenswrapper[4840]: E1209 18:22:27.621886 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:22:30 crc kubenswrapper[4840]: E1209 18:22:30.613140 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:22:30 crc kubenswrapper[4840]: I1209 18:22:30.981583 4840 generic.go:334] "Generic (PLEG): container finished" podID="20290271-4f20-4407-b1a4-063880514c1e" containerID="ba9c1ed8e57eee14cb0de563e96ecae46c6e3010a14c4e66017fd1819420e7b9" exitCode=2 Dec 09 18:22:30 crc kubenswrapper[4840]: I1209 18:22:30.981662 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" event={"ID":"20290271-4f20-4407-b1a4-063880514c1e","Type":"ContainerDied","Data":"ba9c1ed8e57eee14cb0de563e96ecae46c6e3010a14c4e66017fd1819420e7b9"} Dec 09 18:22:32 crc kubenswrapper[4840]: I1209 18:22:32.567852 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:22:32 crc kubenswrapper[4840]: I1209 18:22:32.706261 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9kc8\" (UniqueName: \"kubernetes.io/projected/20290271-4f20-4407-b1a4-063880514c1e-kube-api-access-s9kc8\") pod \"20290271-4f20-4407-b1a4-063880514c1e\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " Dec 09 18:22:32 crc kubenswrapper[4840]: I1209 18:22:32.706482 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-inventory\") pod \"20290271-4f20-4407-b1a4-063880514c1e\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " Dec 09 18:22:32 crc kubenswrapper[4840]: I1209 18:22:32.706530 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-ssh-key\") pod \"20290271-4f20-4407-b1a4-063880514c1e\" (UID: \"20290271-4f20-4407-b1a4-063880514c1e\") " Dec 09 18:22:32 crc kubenswrapper[4840]: I1209 18:22:32.711855 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20290271-4f20-4407-b1a4-063880514c1e-kube-api-access-s9kc8" (OuterVolumeSpecName: "kube-api-access-s9kc8") pod "20290271-4f20-4407-b1a4-063880514c1e" (UID: "20290271-4f20-4407-b1a4-063880514c1e"). InnerVolumeSpecName "kube-api-access-s9kc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:22:32 crc kubenswrapper[4840]: I1209 18:22:32.737578 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "20290271-4f20-4407-b1a4-063880514c1e" (UID: "20290271-4f20-4407-b1a4-063880514c1e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:22:32 crc kubenswrapper[4840]: I1209 18:22:32.737883 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-inventory" (OuterVolumeSpecName: "inventory") pod "20290271-4f20-4407-b1a4-063880514c1e" (UID: "20290271-4f20-4407-b1a4-063880514c1e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:22:32 crc kubenswrapper[4840]: I1209 18:22:32.809902 4840 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 18:22:32 crc kubenswrapper[4840]: I1209 18:22:32.809938 4840 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/20290271-4f20-4407-b1a4-063880514c1e-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 18:22:32 crc kubenswrapper[4840]: I1209 18:22:32.809948 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9kc8\" (UniqueName: \"kubernetes.io/projected/20290271-4f20-4407-b1a4-063880514c1e-kube-api-access-s9kc8\") on node \"crc\" DevicePath \"\"" Dec 09 18:22:33 crc kubenswrapper[4840]: I1209 18:22:33.000398 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" event={"ID":"20290271-4f20-4407-b1a4-063880514c1e","Type":"ContainerDied","Data":"093dba91c02a4a65d71b726ad14da91b58887159a6c50b45aa08e5e4204af6ae"} Dec 09 18:22:33 crc kubenswrapper[4840]: I1209 18:22:33.000444 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="093dba91c02a4a65d71b726ad14da91b58887159a6c50b45aa08e5e4204af6ae" Dec 09 18:22:33 crc kubenswrapper[4840]: I1209 18:22:33.000524 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr" Dec 09 18:22:33 crc kubenswrapper[4840]: E1209 18:22:33.278329 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20290271_4f20_4407_b1a4_063880514c1e.slice\": RecentStats: unable to find data in memory cache]" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.621767 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-lv92q/must-gather-2pps9"] Dec 09 18:22:38 crc kubenswrapper[4840]: E1209 18:22:38.622774 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="263ae667-376a-4b0b-8509-0342fddb0392" containerName="extract-content" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.622791 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="263ae667-376a-4b0b-8509-0342fddb0392" containerName="extract-content" Dec 09 18:22:38 crc kubenswrapper[4840]: E1209 18:22:38.622828 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="263ae667-376a-4b0b-8509-0342fddb0392" containerName="registry-server" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.622835 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="263ae667-376a-4b0b-8509-0342fddb0392" containerName="registry-server" Dec 09 18:22:38 crc kubenswrapper[4840]: E1209 18:22:38.622843 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20290271-4f20-4407-b1a4-063880514c1e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.622851 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="20290271-4f20-4407-b1a4-063880514c1e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 18:22:38 crc kubenswrapper[4840]: E1209 18:22:38.622874 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="263ae667-376a-4b0b-8509-0342fddb0392" containerName="extract-utilities" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.622883 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="263ae667-376a-4b0b-8509-0342fddb0392" containerName="extract-utilities" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.623170 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="263ae667-376a-4b0b-8509-0342fddb0392" containerName="registry-server" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.623191 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="20290271-4f20-4407-b1a4-063880514c1e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.624708 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/must-gather-2pps9" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.628719 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-lv92q"/"default-dockercfg-p4djj" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.628891 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-lv92q"/"kube-root-ca.crt" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.629065 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-lv92q"/"openshift-service-ca.crt" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.632152 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-lv92q/must-gather-2pps9"] Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.780396 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnhqx\" (UniqueName: \"kubernetes.io/projected/fe16bca5-1a60-4cb6-a11c-474f241a53fa-kube-api-access-wnhqx\") pod \"must-gather-2pps9\" (UID: \"fe16bca5-1a60-4cb6-a11c-474f241a53fa\") " pod="openshift-must-gather-lv92q/must-gather-2pps9" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.780651 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/fe16bca5-1a60-4cb6-a11c-474f241a53fa-must-gather-output\") pod \"must-gather-2pps9\" (UID: \"fe16bca5-1a60-4cb6-a11c-474f241a53fa\") " pod="openshift-must-gather-lv92q/must-gather-2pps9" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.882516 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/fe16bca5-1a60-4cb6-a11c-474f241a53fa-must-gather-output\") pod \"must-gather-2pps9\" (UID: \"fe16bca5-1a60-4cb6-a11c-474f241a53fa\") " pod="openshift-must-gather-lv92q/must-gather-2pps9" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.882919 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnhqx\" (UniqueName: \"kubernetes.io/projected/fe16bca5-1a60-4cb6-a11c-474f241a53fa-kube-api-access-wnhqx\") pod \"must-gather-2pps9\" (UID: \"fe16bca5-1a60-4cb6-a11c-474f241a53fa\") " pod="openshift-must-gather-lv92q/must-gather-2pps9" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.883071 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/fe16bca5-1a60-4cb6-a11c-474f241a53fa-must-gather-output\") pod \"must-gather-2pps9\" (UID: \"fe16bca5-1a60-4cb6-a11c-474f241a53fa\") " pod="openshift-must-gather-lv92q/must-gather-2pps9" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.909941 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnhqx\" (UniqueName: \"kubernetes.io/projected/fe16bca5-1a60-4cb6-a11c-474f241a53fa-kube-api-access-wnhqx\") pod \"must-gather-2pps9\" (UID: \"fe16bca5-1a60-4cb6-a11c-474f241a53fa\") " pod="openshift-must-gather-lv92q/must-gather-2pps9" Dec 09 18:22:38 crc kubenswrapper[4840]: I1209 18:22:38.951107 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/must-gather-2pps9" Dec 09 18:22:39 crc kubenswrapper[4840]: I1209 18:22:39.458305 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-lv92q/must-gather-2pps9"] Dec 09 18:22:40 crc kubenswrapper[4840]: I1209 18:22:40.098139 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lv92q/must-gather-2pps9" event={"ID":"fe16bca5-1a60-4cb6-a11c-474f241a53fa","Type":"ContainerStarted","Data":"36a66106ef498dcbf53ce800c02a0c9612076edd0ae6406f3b2f01c4b7042fae"} Dec 09 18:22:41 crc kubenswrapper[4840]: E1209 18:22:41.611030 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:22:42 crc kubenswrapper[4840]: I1209 18:22:42.608685 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:22:42 crc kubenswrapper[4840]: E1209 18:22:42.608978 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:22:44 crc kubenswrapper[4840]: E1209 18:22:44.628402 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:22:49 crc kubenswrapper[4840]: I1209 18:22:49.200793 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lv92q/must-gather-2pps9" event={"ID":"fe16bca5-1a60-4cb6-a11c-474f241a53fa","Type":"ContainerStarted","Data":"1425747da36769af2705b195370a455f13f6b6fef59a00622ca1c28e672d0f59"} Dec 09 18:22:49 crc kubenswrapper[4840]: I1209 18:22:49.201295 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lv92q/must-gather-2pps9" event={"ID":"fe16bca5-1a60-4cb6-a11c-474f241a53fa","Type":"ContainerStarted","Data":"e850b1969e32236f5cf584f245aa47c22e976a16052c9f197e831041aa1fecd3"} Dec 09 18:22:49 crc kubenswrapper[4840]: I1209 18:22:49.230178 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-lv92q/must-gather-2pps9" podStartSLOduration=3.23613683 podStartE2EDuration="11.230163614s" podCreationTimestamp="2025-12-09 18:22:38 +0000 UTC" firstStartedPulling="2025-12-09 18:22:39.457958942 +0000 UTC m=+5145.449069585" lastFinishedPulling="2025-12-09 18:22:47.451985736 +0000 UTC m=+5153.443096369" observedRunningTime="2025-12-09 18:22:49.227219582 +0000 UTC m=+5155.218330215" watchObservedRunningTime="2025-12-09 18:22:49.230163614 +0000 UTC m=+5155.221274247" Dec 09 18:22:53 crc kubenswrapper[4840]: I1209 18:22:53.187041 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-lv92q/crc-debug-s4z7c"] Dec 09 18:22:53 crc kubenswrapper[4840]: I1209 18:22:53.188684 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/crc-debug-s4z7c" Dec 09 18:22:53 crc kubenswrapper[4840]: I1209 18:22:53.257632 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2200bad5-0252-4d5a-b8a5-efa991ac606f-host\") pod \"crc-debug-s4z7c\" (UID: \"2200bad5-0252-4d5a-b8a5-efa991ac606f\") " pod="openshift-must-gather-lv92q/crc-debug-s4z7c" Dec 09 18:22:53 crc kubenswrapper[4840]: I1209 18:22:53.258088 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frrz6\" (UniqueName: \"kubernetes.io/projected/2200bad5-0252-4d5a-b8a5-efa991ac606f-kube-api-access-frrz6\") pod \"crc-debug-s4z7c\" (UID: \"2200bad5-0252-4d5a-b8a5-efa991ac606f\") " pod="openshift-must-gather-lv92q/crc-debug-s4z7c" Dec 09 18:22:53 crc kubenswrapper[4840]: I1209 18:22:53.360669 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2200bad5-0252-4d5a-b8a5-efa991ac606f-host\") pod \"crc-debug-s4z7c\" (UID: \"2200bad5-0252-4d5a-b8a5-efa991ac606f\") " pod="openshift-must-gather-lv92q/crc-debug-s4z7c" Dec 09 18:22:53 crc kubenswrapper[4840]: I1209 18:22:53.360812 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frrz6\" (UniqueName: \"kubernetes.io/projected/2200bad5-0252-4d5a-b8a5-efa991ac606f-kube-api-access-frrz6\") pod \"crc-debug-s4z7c\" (UID: \"2200bad5-0252-4d5a-b8a5-efa991ac606f\") " pod="openshift-must-gather-lv92q/crc-debug-s4z7c" Dec 09 18:22:53 crc kubenswrapper[4840]: I1209 18:22:53.360831 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2200bad5-0252-4d5a-b8a5-efa991ac606f-host\") pod \"crc-debug-s4z7c\" (UID: \"2200bad5-0252-4d5a-b8a5-efa991ac606f\") " pod="openshift-must-gather-lv92q/crc-debug-s4z7c" Dec 09 18:22:53 crc kubenswrapper[4840]: I1209 18:22:53.386717 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frrz6\" (UniqueName: \"kubernetes.io/projected/2200bad5-0252-4d5a-b8a5-efa991ac606f-kube-api-access-frrz6\") pod \"crc-debug-s4z7c\" (UID: \"2200bad5-0252-4d5a-b8a5-efa991ac606f\") " pod="openshift-must-gather-lv92q/crc-debug-s4z7c" Dec 09 18:22:53 crc kubenswrapper[4840]: I1209 18:22:53.509756 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/crc-debug-s4z7c" Dec 09 18:22:53 crc kubenswrapper[4840]: W1209 18:22:53.546486 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2200bad5_0252_4d5a_b8a5_efa991ac606f.slice/crio-741950d9fa99ff82183da9ac006ad32951d24e2b3f27b7df18b06bb8b1b7607e WatchSource:0}: Error finding container 741950d9fa99ff82183da9ac006ad32951d24e2b3f27b7df18b06bb8b1b7607e: Status 404 returned error can't find the container with id 741950d9fa99ff82183da9ac006ad32951d24e2b3f27b7df18b06bb8b1b7607e Dec 09 18:22:54 crc kubenswrapper[4840]: I1209 18:22:54.243782 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lv92q/crc-debug-s4z7c" event={"ID":"2200bad5-0252-4d5a-b8a5-efa991ac606f","Type":"ContainerStarted","Data":"741950d9fa99ff82183da9ac006ad32951d24e2b3f27b7df18b06bb8b1b7607e"} Dec 09 18:22:55 crc kubenswrapper[4840]: E1209 18:22:55.611370 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:22:57 crc kubenswrapper[4840]: I1209 18:22:57.609265 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:22:57 crc kubenswrapper[4840]: E1209 18:22:57.609830 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:22:59 crc kubenswrapper[4840]: E1209 18:22:59.611434 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:23:06 crc kubenswrapper[4840]: I1209 18:23:06.374918 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lv92q/crc-debug-s4z7c" event={"ID":"2200bad5-0252-4d5a-b8a5-efa991ac606f","Type":"ContainerStarted","Data":"93848fbad3f2003881a62232e89eca381c59202f0bb4bfdde0c5ded3146bafdb"} Dec 09 18:23:06 crc kubenswrapper[4840]: I1209 18:23:06.398991 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-lv92q/crc-debug-s4z7c" podStartSLOduration=1.078409406 podStartE2EDuration="13.398945911s" podCreationTimestamp="2025-12-09 18:22:53 +0000 UTC" firstStartedPulling="2025-12-09 18:22:53.548368772 +0000 UTC m=+5159.539479405" lastFinishedPulling="2025-12-09 18:23:05.868905277 +0000 UTC m=+5171.860015910" observedRunningTime="2025-12-09 18:23:06.389089714 +0000 UTC m=+5172.380200347" watchObservedRunningTime="2025-12-09 18:23:06.398945911 +0000 UTC m=+5172.390056544" Dec 09 18:23:09 crc kubenswrapper[4840]: E1209 18:23:09.610902 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:23:11 crc kubenswrapper[4840]: I1209 18:23:11.608411 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:23:11 crc kubenswrapper[4840]: E1209 18:23:11.609131 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:23:14 crc kubenswrapper[4840]: E1209 18:23:14.616302 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:23:22 crc kubenswrapper[4840]: E1209 18:23:22.620536 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:23:23 crc kubenswrapper[4840]: I1209 18:23:23.609580 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:23:23 crc kubenswrapper[4840]: E1209 18:23:23.610608 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:23:28 crc kubenswrapper[4840]: E1209 18:23:28.611271 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:23:28 crc kubenswrapper[4840]: I1209 18:23:28.611948 4840 generic.go:334] "Generic (PLEG): container finished" podID="2200bad5-0252-4d5a-b8a5-efa991ac606f" containerID="93848fbad3f2003881a62232e89eca381c59202f0bb4bfdde0c5ded3146bafdb" exitCode=0 Dec 09 18:23:28 crc kubenswrapper[4840]: I1209 18:23:28.619779 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lv92q/crc-debug-s4z7c" event={"ID":"2200bad5-0252-4d5a-b8a5-efa991ac606f","Type":"ContainerDied","Data":"93848fbad3f2003881a62232e89eca381c59202f0bb4bfdde0c5ded3146bafdb"} Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.760265 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/crc-debug-s4z7c" Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.838872 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-lv92q/crc-debug-s4z7c"] Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.848002 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-lv92q/crc-debug-s4z7c"] Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.872179 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2200bad5-0252-4d5a-b8a5-efa991ac606f-host\") pod \"2200bad5-0252-4d5a-b8a5-efa991ac606f\" (UID: \"2200bad5-0252-4d5a-b8a5-efa991ac606f\") " Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.872365 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frrz6\" (UniqueName: \"kubernetes.io/projected/2200bad5-0252-4d5a-b8a5-efa991ac606f-kube-api-access-frrz6\") pod \"2200bad5-0252-4d5a-b8a5-efa991ac606f\" (UID: \"2200bad5-0252-4d5a-b8a5-efa991ac606f\") " Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.874584 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2200bad5-0252-4d5a-b8a5-efa991ac606f-host" (OuterVolumeSpecName: "host") pod "2200bad5-0252-4d5a-b8a5-efa991ac606f" (UID: "2200bad5-0252-4d5a-b8a5-efa991ac606f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.880225 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2200bad5-0252-4d5a-b8a5-efa991ac606f-kube-api-access-frrz6" (OuterVolumeSpecName: "kube-api-access-frrz6") pod "2200bad5-0252-4d5a-b8a5-efa991ac606f" (UID: "2200bad5-0252-4d5a-b8a5-efa991ac606f"). InnerVolumeSpecName "kube-api-access-frrz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.880613 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zz8bb"] Dec 09 18:23:29 crc kubenswrapper[4840]: E1209 18:23:29.881012 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2200bad5-0252-4d5a-b8a5-efa991ac606f" containerName="container-00" Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.881028 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="2200bad5-0252-4d5a-b8a5-efa991ac606f" containerName="container-00" Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.881264 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="2200bad5-0252-4d5a-b8a5-efa991ac606f" containerName="container-00" Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.882683 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.900105 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zz8bb"] Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.974305 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frrz6\" (UniqueName: \"kubernetes.io/projected/2200bad5-0252-4d5a-b8a5-efa991ac606f-kube-api-access-frrz6\") on node \"crc\" DevicePath \"\"" Dec 09 18:23:29 crc kubenswrapper[4840]: I1209 18:23:29.974337 4840 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2200bad5-0252-4d5a-b8a5-efa991ac606f-host\") on node \"crc\" DevicePath \"\"" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.075953 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-utilities\") pod \"redhat-operators-zz8bb\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.076035 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn5lm\" (UniqueName: \"kubernetes.io/projected/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-kube-api-access-xn5lm\") pod \"redhat-operators-zz8bb\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.076113 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-catalog-content\") pod \"redhat-operators-zz8bb\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.178166 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn5lm\" (UniqueName: \"kubernetes.io/projected/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-kube-api-access-xn5lm\") pod \"redhat-operators-zz8bb\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.178290 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-catalog-content\") pod \"redhat-operators-zz8bb\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.178400 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-utilities\") pod \"redhat-operators-zz8bb\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.178859 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-utilities\") pod \"redhat-operators-zz8bb\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.178879 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-catalog-content\") pod \"redhat-operators-zz8bb\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.205814 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn5lm\" (UniqueName: \"kubernetes.io/projected/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-kube-api-access-xn5lm\") pod \"redhat-operators-zz8bb\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.273596 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.619929 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2200bad5-0252-4d5a-b8a5-efa991ac606f" path="/var/lib/kubelet/pods/2200bad5-0252-4d5a-b8a5-efa991ac606f/volumes" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.638263 4840 scope.go:117] "RemoveContainer" containerID="93848fbad3f2003881a62232e89eca381c59202f0bb4bfdde0c5ded3146bafdb" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.638475 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/crc-debug-s4z7c" Dec 09 18:23:30 crc kubenswrapper[4840]: I1209 18:23:30.776945 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zz8bb"] Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.138363 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-lv92q/crc-debug-qnk9t"] Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.142402 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/crc-debug-qnk9t" Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.205000 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j89k5\" (UniqueName: \"kubernetes.io/projected/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-kube-api-access-j89k5\") pod \"crc-debug-qnk9t\" (UID: \"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4\") " pod="openshift-must-gather-lv92q/crc-debug-qnk9t" Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.205063 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-host\") pod \"crc-debug-qnk9t\" (UID: \"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4\") " pod="openshift-must-gather-lv92q/crc-debug-qnk9t" Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.307276 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j89k5\" (UniqueName: \"kubernetes.io/projected/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-kube-api-access-j89k5\") pod \"crc-debug-qnk9t\" (UID: \"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4\") " pod="openshift-must-gather-lv92q/crc-debug-qnk9t" Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.307334 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-host\") pod \"crc-debug-qnk9t\" (UID: \"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4\") " pod="openshift-must-gather-lv92q/crc-debug-qnk9t" Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.307505 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-host\") pod \"crc-debug-qnk9t\" (UID: \"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4\") " pod="openshift-must-gather-lv92q/crc-debug-qnk9t" Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.339877 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j89k5\" (UniqueName: \"kubernetes.io/projected/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-kube-api-access-j89k5\") pod \"crc-debug-qnk9t\" (UID: \"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4\") " pod="openshift-must-gather-lv92q/crc-debug-qnk9t" Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.475556 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/crc-debug-qnk9t" Dec 09 18:23:31 crc kubenswrapper[4840]: W1209 18:23:31.505757 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7ec76dc_58f5_460a_8ee7_5d40426cf8f4.slice/crio-286ec8f2181e6bc5a9d48f9c88321b4f6aa351287e10a76e2221b6548aa7fb83 WatchSource:0}: Error finding container 286ec8f2181e6bc5a9d48f9c88321b4f6aa351287e10a76e2221b6548aa7fb83: Status 404 returned error can't find the container with id 286ec8f2181e6bc5a9d48f9c88321b4f6aa351287e10a76e2221b6548aa7fb83 Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.655074 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lv92q/crc-debug-qnk9t" event={"ID":"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4","Type":"ContainerStarted","Data":"286ec8f2181e6bc5a9d48f9c88321b4f6aa351287e10a76e2221b6548aa7fb83"} Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.660545 4840 generic.go:334] "Generic (PLEG): container finished" podID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerID="be59fe44dfa4831949b9458e04822f29c5515a1dace78759840a6126f0d47f85" exitCode=0 Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.660622 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz8bb" event={"ID":"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf","Type":"ContainerDied","Data":"be59fe44dfa4831949b9458e04822f29c5515a1dace78759840a6126f0d47f85"} Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.661234 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz8bb" event={"ID":"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf","Type":"ContainerStarted","Data":"0aa1ebf37ae7e4b2d405f45ce050360d9229b7600f6ff414193564a71d875244"} Dec 09 18:23:31 crc kubenswrapper[4840]: I1209 18:23:31.665586 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 18:23:32 crc kubenswrapper[4840]: I1209 18:23:32.681790 4840 generic.go:334] "Generic (PLEG): container finished" podID="c7ec76dc-58f5-460a-8ee7-5d40426cf8f4" containerID="14b64e6391f5fd5a0d3dadf2f5078a28950621de1ff8d2f43569f669931f41dd" exitCode=1 Dec 09 18:23:32 crc kubenswrapper[4840]: I1209 18:23:32.681924 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lv92q/crc-debug-qnk9t" event={"ID":"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4","Type":"ContainerDied","Data":"14b64e6391f5fd5a0d3dadf2f5078a28950621de1ff8d2f43569f669931f41dd"} Dec 09 18:23:32 crc kubenswrapper[4840]: I1209 18:23:32.730388 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-lv92q/crc-debug-qnk9t"] Dec 09 18:23:32 crc kubenswrapper[4840]: I1209 18:23:32.744147 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-lv92q/crc-debug-qnk9t"] Dec 09 18:23:33 crc kubenswrapper[4840]: I1209 18:23:33.693412 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz8bb" event={"ID":"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf","Type":"ContainerStarted","Data":"f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0"} Dec 09 18:23:33 crc kubenswrapper[4840]: I1209 18:23:33.824051 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/crc-debug-qnk9t" Dec 09 18:23:33 crc kubenswrapper[4840]: I1209 18:23:33.862157 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j89k5\" (UniqueName: \"kubernetes.io/projected/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-kube-api-access-j89k5\") pod \"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4\" (UID: \"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4\") " Dec 09 18:23:33 crc kubenswrapper[4840]: I1209 18:23:33.862385 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-host\") pod \"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4\" (UID: \"c7ec76dc-58f5-460a-8ee7-5d40426cf8f4\") " Dec 09 18:23:33 crc kubenswrapper[4840]: I1209 18:23:33.862528 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-host" (OuterVolumeSpecName: "host") pod "c7ec76dc-58f5-460a-8ee7-5d40426cf8f4" (UID: "c7ec76dc-58f5-460a-8ee7-5d40426cf8f4"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 18:23:33 crc kubenswrapper[4840]: I1209 18:23:33.863338 4840 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-host\") on node \"crc\" DevicePath \"\"" Dec 09 18:23:33 crc kubenswrapper[4840]: I1209 18:23:33.878176 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-kube-api-access-j89k5" (OuterVolumeSpecName: "kube-api-access-j89k5") pod "c7ec76dc-58f5-460a-8ee7-5d40426cf8f4" (UID: "c7ec76dc-58f5-460a-8ee7-5d40426cf8f4"). InnerVolumeSpecName "kube-api-access-j89k5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:23:33 crc kubenswrapper[4840]: I1209 18:23:33.965663 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j89k5\" (UniqueName: \"kubernetes.io/projected/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4-kube-api-access-j89k5\") on node \"crc\" DevicePath \"\"" Dec 09 18:23:34 crc kubenswrapper[4840]: I1209 18:23:34.621848 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7ec76dc-58f5-460a-8ee7-5d40426cf8f4" path="/var/lib/kubelet/pods/c7ec76dc-58f5-460a-8ee7-5d40426cf8f4/volumes" Dec 09 18:23:34 crc kubenswrapper[4840]: I1209 18:23:34.702553 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/crc-debug-qnk9t" Dec 09 18:23:34 crc kubenswrapper[4840]: I1209 18:23:34.702573 4840 scope.go:117] "RemoveContainer" containerID="14b64e6391f5fd5a0d3dadf2f5078a28950621de1ff8d2f43569f669931f41dd" Dec 09 18:23:36 crc kubenswrapper[4840]: I1209 18:23:36.725988 4840 generic.go:334] "Generic (PLEG): container finished" podID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerID="f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0" exitCode=0 Dec 09 18:23:36 crc kubenswrapper[4840]: I1209 18:23:36.726081 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz8bb" event={"ID":"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf","Type":"ContainerDied","Data":"f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0"} Dec 09 18:23:36 crc kubenswrapper[4840]: E1209 18:23:36.733320 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:23:36 crc kubenswrapper[4840]: E1209 18:23:36.733562 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:23:36 crc kubenswrapper[4840]: E1209 18:23:36.733700 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:23:36 crc kubenswrapper[4840]: E1209 18:23:36.735178 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:23:37 crc kubenswrapper[4840]: I1209 18:23:37.739360 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz8bb" event={"ID":"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf","Type":"ContainerStarted","Data":"039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81"} Dec 09 18:23:37 crc kubenswrapper[4840]: I1209 18:23:37.758180 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zz8bb" podStartSLOduration=3.190221423 podStartE2EDuration="8.758161828s" podCreationTimestamp="2025-12-09 18:23:29 +0000 UTC" firstStartedPulling="2025-12-09 18:23:31.665283431 +0000 UTC m=+5197.656394064" lastFinishedPulling="2025-12-09 18:23:37.233223836 +0000 UTC m=+5203.224334469" observedRunningTime="2025-12-09 18:23:37.755797341 +0000 UTC m=+5203.746907994" watchObservedRunningTime="2025-12-09 18:23:37.758161828 +0000 UTC m=+5203.749272461" Dec 09 18:23:38 crc kubenswrapper[4840]: I1209 18:23:38.608942 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:23:38 crc kubenswrapper[4840]: E1209 18:23:38.609548 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:23:40 crc kubenswrapper[4840]: I1209 18:23:40.274840 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:40 crc kubenswrapper[4840]: I1209 18:23:40.276014 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:40 crc kubenswrapper[4840]: E1209 18:23:40.612711 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:23:41 crc kubenswrapper[4840]: I1209 18:23:41.325854 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zz8bb" podUID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerName="registry-server" probeResult="failure" output=< Dec 09 18:23:41 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 18:23:41 crc kubenswrapper[4840]: > Dec 09 18:23:49 crc kubenswrapper[4840]: I1209 18:23:49.614202 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:23:49 crc kubenswrapper[4840]: E1209 18:23:49.618298 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:23:50 crc kubenswrapper[4840]: I1209 18:23:50.331921 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:50 crc kubenswrapper[4840]: I1209 18:23:50.384710 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:50 crc kubenswrapper[4840]: I1209 18:23:50.564192 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zz8bb"] Dec 09 18:23:51 crc kubenswrapper[4840]: E1209 18:23:51.610992 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:23:51 crc kubenswrapper[4840]: I1209 18:23:51.962661 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zz8bb" podUID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerName="registry-server" containerID="cri-o://039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81" gracePeriod=2 Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.487082 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.561803 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xn5lm\" (UniqueName: \"kubernetes.io/projected/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-kube-api-access-xn5lm\") pod \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.562240 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-utilities\") pod \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.562421 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-catalog-content\") pod \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\" (UID: \"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf\") " Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.562954 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-utilities" (OuterVolumeSpecName: "utilities") pod "55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" (UID: "55e0c89b-1e53-45e1-8c26-8381ebb8fcbf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.563266 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.570220 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-kube-api-access-xn5lm" (OuterVolumeSpecName: "kube-api-access-xn5lm") pod "55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" (UID: "55e0c89b-1e53-45e1-8c26-8381ebb8fcbf"). InnerVolumeSpecName "kube-api-access-xn5lm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:23:52 crc kubenswrapper[4840]: E1209 18:23:52.610307 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.665811 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xn5lm\" (UniqueName: \"kubernetes.io/projected/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-kube-api-access-xn5lm\") on node \"crc\" DevicePath \"\"" Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.692425 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" (UID: "55e0c89b-1e53-45e1-8c26-8381ebb8fcbf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.767247 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.976549 4840 generic.go:334] "Generic (PLEG): container finished" podID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerID="039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81" exitCode=0 Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.976591 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz8bb" event={"ID":"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf","Type":"ContainerDied","Data":"039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81"} Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.976606 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz8bb" Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.976615 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz8bb" event={"ID":"55e0c89b-1e53-45e1-8c26-8381ebb8fcbf","Type":"ContainerDied","Data":"0aa1ebf37ae7e4b2d405f45ce050360d9229b7600f6ff414193564a71d875244"} Dec 09 18:23:52 crc kubenswrapper[4840]: I1209 18:23:52.976631 4840 scope.go:117] "RemoveContainer" containerID="039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81" Dec 09 18:23:53 crc kubenswrapper[4840]: I1209 18:23:53.023921 4840 scope.go:117] "RemoveContainer" containerID="f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0" Dec 09 18:23:53 crc kubenswrapper[4840]: I1209 18:23:53.028764 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zz8bb"] Dec 09 18:23:53 crc kubenswrapper[4840]: I1209 18:23:53.046832 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zz8bb"] Dec 09 18:23:53 crc kubenswrapper[4840]: I1209 18:23:53.088824 4840 scope.go:117] "RemoveContainer" containerID="be59fe44dfa4831949b9458e04822f29c5515a1dace78759840a6126f0d47f85" Dec 09 18:23:53 crc kubenswrapper[4840]: I1209 18:23:53.120138 4840 scope.go:117] "RemoveContainer" containerID="039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81" Dec 09 18:23:53 crc kubenswrapper[4840]: E1209 18:23:53.120511 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81\": container with ID starting with 039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81 not found: ID does not exist" containerID="039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81" Dec 09 18:23:53 crc kubenswrapper[4840]: I1209 18:23:53.120541 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81"} err="failed to get container status \"039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81\": rpc error: code = NotFound desc = could not find container \"039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81\": container with ID starting with 039df818a6150747ddc738f4931b8adf8e37dfafaa02bfa804189380813e9e81 not found: ID does not exist" Dec 09 18:23:53 crc kubenswrapper[4840]: I1209 18:23:53.120562 4840 scope.go:117] "RemoveContainer" containerID="f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0" Dec 09 18:23:53 crc kubenswrapper[4840]: E1209 18:23:53.120898 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0\": container with ID starting with f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0 not found: ID does not exist" containerID="f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0" Dec 09 18:23:53 crc kubenswrapper[4840]: I1209 18:23:53.120919 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0"} err="failed to get container status \"f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0\": rpc error: code = NotFound desc = could not find container \"f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0\": container with ID starting with f9270124d9befed2a63546191089f0759730936b8904ab8dfbd70d84febfbab0 not found: ID does not exist" Dec 09 18:23:53 crc kubenswrapper[4840]: I1209 18:23:53.120942 4840 scope.go:117] "RemoveContainer" containerID="be59fe44dfa4831949b9458e04822f29c5515a1dace78759840a6126f0d47f85" Dec 09 18:23:53 crc kubenswrapper[4840]: E1209 18:23:53.122141 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be59fe44dfa4831949b9458e04822f29c5515a1dace78759840a6126f0d47f85\": container with ID starting with be59fe44dfa4831949b9458e04822f29c5515a1dace78759840a6126f0d47f85 not found: ID does not exist" containerID="be59fe44dfa4831949b9458e04822f29c5515a1dace78759840a6126f0d47f85" Dec 09 18:23:53 crc kubenswrapper[4840]: I1209 18:23:53.122196 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be59fe44dfa4831949b9458e04822f29c5515a1dace78759840a6126f0d47f85"} err="failed to get container status \"be59fe44dfa4831949b9458e04822f29c5515a1dace78759840a6126f0d47f85\": rpc error: code = NotFound desc = could not find container \"be59fe44dfa4831949b9458e04822f29c5515a1dace78759840a6126f0d47f85\": container with ID starting with be59fe44dfa4831949b9458e04822f29c5515a1dace78759840a6126f0d47f85 not found: ID does not exist" Dec 09 18:23:54 crc kubenswrapper[4840]: I1209 18:23:54.620549 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" path="/var/lib/kubelet/pods/55e0c89b-1e53-45e1-8c26-8381ebb8fcbf/volumes" Dec 09 18:24:03 crc kubenswrapper[4840]: E1209 18:24:03.610915 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:24:03 crc kubenswrapper[4840]: E1209 18:24:03.611022 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:24:04 crc kubenswrapper[4840]: I1209 18:24:04.623582 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:24:04 crc kubenswrapper[4840]: E1209 18:24:04.624282 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:24:16 crc kubenswrapper[4840]: E1209 18:24:16.728730 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:24:16 crc kubenswrapper[4840]: E1209 18:24:16.729216 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:24:16 crc kubenswrapper[4840]: E1209 18:24:16.729331 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:24:16 crc kubenswrapper[4840]: E1209 18:24:16.730545 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:24:17 crc kubenswrapper[4840]: I1209 18:24:17.609135 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:24:17 crc kubenswrapper[4840]: E1209 18:24:17.609869 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:24:18 crc kubenswrapper[4840]: E1209 18:24:18.614666 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:24:21 crc kubenswrapper[4840]: I1209 18:24:21.277447 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_694a7e34-d1ce-4a1b-8475-fbb5d250b955/init-config-reloader/0.log" Dec 09 18:24:21 crc kubenswrapper[4840]: I1209 18:24:21.476377 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_694a7e34-d1ce-4a1b-8475-fbb5d250b955/alertmanager/0.log" Dec 09 18:24:21 crc kubenswrapper[4840]: I1209 18:24:21.501731 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_694a7e34-d1ce-4a1b-8475-fbb5d250b955/init-config-reloader/0.log" Dec 09 18:24:21 crc kubenswrapper[4840]: I1209 18:24:21.510296 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_694a7e34-d1ce-4a1b-8475-fbb5d250b955/config-reloader/0.log" Dec 09 18:24:21 crc kubenswrapper[4840]: I1209 18:24:21.726726 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7c5899b866-c7lp6_275eb97a-385a-428b-8635-e31d1e4def98/barbican-api-log/0.log" Dec 09 18:24:21 crc kubenswrapper[4840]: I1209 18:24:21.732634 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7c5899b866-c7lp6_275eb97a-385a-428b-8635-e31d1e4def98/barbican-api/0.log" Dec 09 18:24:21 crc kubenswrapper[4840]: I1209 18:24:21.787044 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7d74754bb4-694mt_84560bb3-a93c-4016-a341-e4c3cba8651e/barbican-keystone-listener/0.log" Dec 09 18:24:21 crc kubenswrapper[4840]: I1209 18:24:21.925295 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7d74754bb4-694mt_84560bb3-a93c-4016-a341-e4c3cba8651e/barbican-keystone-listener-log/0.log" Dec 09 18:24:22 crc kubenswrapper[4840]: I1209 18:24:22.010057 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-8455558bd7-bssg9_bef7e9a8-ed8b-477e-a9f8-329dda25e45c/barbican-worker-log/0.log" Dec 09 18:24:22 crc kubenswrapper[4840]: I1209 18:24:22.021170 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-8455558bd7-bssg9_bef7e9a8-ed8b-477e-a9f8-329dda25e45c/barbican-worker/0.log" Dec 09 18:24:22 crc kubenswrapper[4840]: I1209 18:24:22.213075 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-kpvgn_07769bef-a8d2-452e-af4d-c33e9c99da4b/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 18:24:22 crc kubenswrapper[4840]: I1209 18:24:22.397369 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_9ec426c3-8fdd-42d9-9ea5-5d751112ee04/ceilometer-notification-agent/0.log" Dec 09 18:24:22 crc kubenswrapper[4840]: I1209 18:24:22.444093 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_9ec426c3-8fdd-42d9-9ea5-5d751112ee04/proxy-httpd/0.log" Dec 09 18:24:22 crc kubenswrapper[4840]: I1209 18:24:22.497283 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_9ec426c3-8fdd-42d9-9ea5-5d751112ee04/sg-core/0.log" Dec 09 18:24:22 crc kubenswrapper[4840]: I1209 18:24:22.632934 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_b22b06eb-f287-43cf-abc6-9cb5580fa71a/cinder-api-log/0.log" Dec 09 18:24:22 crc kubenswrapper[4840]: I1209 18:24:22.708344 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_b22b06eb-f287-43cf-abc6-9cb5580fa71a/cinder-api/0.log" Dec 09 18:24:22 crc kubenswrapper[4840]: I1209 18:24:22.748151 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_308bf122-33f6-46ac-bcda-722eacff6427/cinder-scheduler/0.log" Dec 09 18:24:22 crc kubenswrapper[4840]: I1209 18:24:22.834699 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_308bf122-33f6-46ac-bcda-722eacff6427/probe/0.log" Dec 09 18:24:23 crc kubenswrapper[4840]: I1209 18:24:23.001904 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-api-0_35c9d492-0c0b-4d85-9235-e7ede2df5752/cloudkitty-api/0.log" Dec 09 18:24:23 crc kubenswrapper[4840]: I1209 18:24:23.022437 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-api-0_35c9d492-0c0b-4d85-9235-e7ede2df5752/cloudkitty-api-log/0.log" Dec 09 18:24:23 crc kubenswrapper[4840]: I1209 18:24:23.260193 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-compactor-0_3049524c-ff2b-4c18-baf0-c15c182583cc/loki-compactor/0.log" Dec 09 18:24:23 crc kubenswrapper[4840]: I1209 18:24:23.708688 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-distributor-664b687b54-mtfrn_be53cf19-ee08-4a03-96d8-5899cd1f59ec/loki-distributor/0.log" Dec 09 18:24:23 crc kubenswrapper[4840]: I1209 18:24:23.735903 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-gateway-bc75944f-b9cg8_4e1e443c-6d35-4788-8cd7-dae8911ffc1e/gateway/0.log" Dec 09 18:24:23 crc kubenswrapper[4840]: I1209 18:24:23.921890 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-gateway-bc75944f-fj2hp_64550645-76ad-4518-ad64-74d530e0a4f1/gateway/0.log" Dec 09 18:24:23 crc kubenswrapper[4840]: I1209 18:24:23.934230 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-index-gateway-0_374f81b4-2b45-4e8a-9b41-898b64e5623f/loki-index-gateway/0.log" Dec 09 18:24:24 crc kubenswrapper[4840]: I1209 18:24:24.218841 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-ingester-0_b45f4212-4ee0-4679-b115-d8d231bf946d/loki-ingester/0.log" Dec 09 18:24:24 crc kubenswrapper[4840]: I1209 18:24:24.236593 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-querier-5467947bf7-xzc88_4cf23999-5210-4106-aa05-9ac1c07da2a1/loki-querier/0.log" Dec 09 18:24:24 crc kubenswrapper[4840]: I1209 18:24:24.548686 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-query-frontend-7c8cd744d9-j86k5_7838bb86-5c5f-4100-aa6c-442e1e591645/loki-query-frontend/0.log" Dec 09 18:24:24 crc kubenswrapper[4840]: I1209 18:24:24.728756 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5475ccd585-vtm9k_048f4974-cd24-4291-a209-a357603a64e8/init/0.log" Dec 09 18:24:24 crc kubenswrapper[4840]: I1209 18:24:24.938191 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5475ccd585-vtm9k_048f4974-cd24-4291-a209-a357603a64e8/init/0.log" Dec 09 18:24:25 crc kubenswrapper[4840]: I1209 18:24:25.001042 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5475ccd585-vtm9k_048f4974-cd24-4291-a209-a357603a64e8/dnsmasq-dns/0.log" Dec 09 18:24:25 crc kubenswrapper[4840]: I1209 18:24:25.583954 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-2hnvl_7fb2365c-d487-44bb-8096-85400eb2f6ee/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 18:24:25 crc kubenswrapper[4840]: I1209 18:24:25.602663 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-77shc_38054200-bff9-439b-a60f-ff6f3b8926f0/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 18:24:25 crc kubenswrapper[4840]: I1209 18:24:25.822254 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-8cnsr_20290271-4f20-4407-b1a4-063880514c1e/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 18:24:25 crc kubenswrapper[4840]: I1209 18:24:25.919375 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-c66mh_b3e6a41a-c85e-42c3-b473-c00456c83bf5/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 18:24:26 crc kubenswrapper[4840]: I1209 18:24:26.083154 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-lm7tr_18b120a1-f1fb-4739-8c18-2a4380eb70e0/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 18:24:26 crc kubenswrapper[4840]: I1209 18:24:26.389485 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-m9tw7_b6a0155a-f3d1-4a80-ad83-1fcce8d5de36/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 18:24:26 crc kubenswrapper[4840]: I1209 18:24:26.443051 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-nkddw_feab3b6f-2a30-4db6-af22-22ecea863f88/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 18:24:26 crc kubenswrapper[4840]: I1209 18:24:26.608355 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_6f9babec-3c0f-47f7-bb1a-e898e153374e/glance-httpd/0.log" Dec 09 18:24:26 crc kubenswrapper[4840]: I1209 18:24:26.757740 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_6f9babec-3c0f-47f7-bb1a-e898e153374e/glance-log/0.log" Dec 09 18:24:26 crc kubenswrapper[4840]: I1209 18:24:26.904524 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_0ae83718-1999-4231-89c7-aac5ea4f930a/glance-httpd/0.log" Dec 09 18:24:26 crc kubenswrapper[4840]: I1209 18:24:26.941366 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_0ae83718-1999-4231-89c7-aac5ea4f930a/glance-log/0.log" Dec 09 18:24:27 crc kubenswrapper[4840]: I1209 18:24:27.112513 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-86649f76d6-p6jhc_8dc145cc-e506-4686-8e22-c881d8fc079f/keystone-api/0.log" Dec 09 18:24:27 crc kubenswrapper[4840]: I1209 18:24:27.759807 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_4ece64f5-c3f1-4872-ad0e-5b64aa06e5e9/kube-state-metrics/0.log" Dec 09 18:24:27 crc kubenswrapper[4840]: I1209 18:24:27.768490 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29421721-t7pw8_fd6ba15a-4332-4bf1-87f7-c0ab97b100e6/keystone-cron/0.log" Dec 09 18:24:28 crc kubenswrapper[4840]: I1209 18:24:28.057710 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-768d86bb9c-skrvq_4ebfb39f-3df3-4538-b69e-5366dc52b442/neutron-api/0.log" Dec 09 18:24:28 crc kubenswrapper[4840]: I1209 18:24:28.191874 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-proc-0_3cbe862b-8e49-4124-88e1-1f32cd429250/cloudkitty-proc/0.log" Dec 09 18:24:28 crc kubenswrapper[4840]: I1209 18:24:28.213704 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-768d86bb9c-skrvq_4ebfb39f-3df3-4538-b69e-5366dc52b442/neutron-httpd/0.log" Dec 09 18:24:28 crc kubenswrapper[4840]: I1209 18:24:28.592600 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_c12bf515-0e18-4026-ba24-0c88f099847e/nova-api-log/0.log" Dec 09 18:24:28 crc kubenswrapper[4840]: I1209 18:24:28.696916 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_1a1d1a1a-f16c-4351-b65b-00322ce7929d/nova-cell0-conductor-conductor/0.log" Dec 09 18:24:28 crc kubenswrapper[4840]: I1209 18:24:28.745334 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_c12bf515-0e18-4026-ba24-0c88f099847e/nova-api-api/0.log" Dec 09 18:24:29 crc kubenswrapper[4840]: I1209 18:24:29.136980 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_5bd10e5c-6713-40dc-b744-349603d760f6/nova-cell1-conductor-conductor/0.log" Dec 09 18:24:29 crc kubenswrapper[4840]: I1209 18:24:29.173838 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_ae9c08f5-b74f-4a78-9f61-425b25ef2f35/nova-cell1-novncproxy-novncproxy/0.log" Dec 09 18:24:29 crc kubenswrapper[4840]: I1209 18:24:29.524000 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_543610b3-fd02-47ce-9bce-a112763de7bd/nova-metadata-log/0.log" Dec 09 18:24:29 crc kubenswrapper[4840]: I1209 18:24:29.572303 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_e8d64fd3-fec5-4d6c-b007-d481268bfe1b/nova-scheduler-scheduler/0.log" Dec 09 18:24:29 crc kubenswrapper[4840]: I1209 18:24:29.608276 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:24:29 crc kubenswrapper[4840]: E1209 18:24:29.608620 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:24:29 crc kubenswrapper[4840]: I1209 18:24:29.727202 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e56689ef-4c1c-4775-9740-3e1ec3a0f4e8/mysql-bootstrap/0.log" Dec 09 18:24:30 crc kubenswrapper[4840]: I1209 18:24:30.012151 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e56689ef-4c1c-4775-9740-3e1ec3a0f4e8/mysql-bootstrap/0.log" Dec 09 18:24:30 crc kubenswrapper[4840]: I1209 18:24:30.057548 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e56689ef-4c1c-4775-9740-3e1ec3a0f4e8/galera/0.log" Dec 09 18:24:30 crc kubenswrapper[4840]: I1209 18:24:30.248294 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_71339989-bd1f-4da4-8976-62fbb767a30e/mysql-bootstrap/0.log" Dec 09 18:24:30 crc kubenswrapper[4840]: I1209 18:24:30.513455 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_71339989-bd1f-4da4-8976-62fbb767a30e/mysql-bootstrap/0.log" Dec 09 18:24:30 crc kubenswrapper[4840]: I1209 18:24:30.523407 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_71339989-bd1f-4da4-8976-62fbb767a30e/galera/0.log" Dec 09 18:24:30 crc kubenswrapper[4840]: I1209 18:24:30.722258 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_2bd34f5c-7383-4aa1-868d-f7f462d7a708/openstackclient/0.log" Dec 09 18:24:30 crc kubenswrapper[4840]: I1209 18:24:30.822609 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-66bsn_cb8eb8de-7e32-4535-9015-394a0621e5a7/openstack-network-exporter/0.log" Dec 09 18:24:31 crc kubenswrapper[4840]: I1209 18:24:31.050903 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-6vxgb_d3189254-8cff-481a-92f3-466a928de54e/ovsdb-server-init/0.log" Dec 09 18:24:31 crc kubenswrapper[4840]: I1209 18:24:31.235000 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-6vxgb_d3189254-8cff-481a-92f3-466a928de54e/ovsdb-server-init/0.log" Dec 09 18:24:31 crc kubenswrapper[4840]: I1209 18:24:31.287031 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-6vxgb_d3189254-8cff-481a-92f3-466a928de54e/ovs-vswitchd/0.log" Dec 09 18:24:31 crc kubenswrapper[4840]: I1209 18:24:31.287917 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-6vxgb_d3189254-8cff-481a-92f3-466a928de54e/ovsdb-server/0.log" Dec 09 18:24:31 crc kubenswrapper[4840]: I1209 18:24:31.302580 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_543610b3-fd02-47ce-9bce-a112763de7bd/nova-metadata-metadata/0.log" Dec 09 18:24:31 crc kubenswrapper[4840]: I1209 18:24:31.516500 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-v78xq_637ab881-6952-409f-8e9d-619aaf72fb51/ovn-controller/0.log" Dec 09 18:24:31 crc kubenswrapper[4840]: I1209 18:24:31.524929 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_8730f858-5803-4a82-bf34-63a2b65ddebb/openstack-network-exporter/0.log" Dec 09 18:24:31 crc kubenswrapper[4840]: E1209 18:24:31.619253 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:24:31 crc kubenswrapper[4840]: I1209 18:24:31.916249 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_8730f858-5803-4a82-bf34-63a2b65ddebb/ovn-northd/0.log" Dec 09 18:24:31 crc kubenswrapper[4840]: I1209 18:24:31.920016 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_7b1009e9-391e-4a13-8d90-f55fb6c3b329/openstack-network-exporter/0.log" Dec 09 18:24:32 crc kubenswrapper[4840]: I1209 18:24:32.009305 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_7b1009e9-391e-4a13-8d90-f55fb6c3b329/ovsdbserver-nb/0.log" Dec 09 18:24:32 crc kubenswrapper[4840]: I1209 18:24:32.158123 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_a4b9253d-0e13-4dd3-8b9a-7428281a743d/openstack-network-exporter/0.log" Dec 09 18:24:32 crc kubenswrapper[4840]: I1209 18:24:32.240991 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_a4b9253d-0e13-4dd3-8b9a-7428281a743d/ovsdbserver-sb/0.log" Dec 09 18:24:32 crc kubenswrapper[4840]: I1209 18:24:32.455988 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5df4f579dd-7gd8n_ccffcd1a-4659-4005-abd2-ae99de7f74d1/placement-log/0.log" Dec 09 18:24:32 crc kubenswrapper[4840]: I1209 18:24:32.457155 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5df4f579dd-7gd8n_ccffcd1a-4659-4005-abd2-ae99de7f74d1/placement-api/0.log" Dec 09 18:24:32 crc kubenswrapper[4840]: I1209 18:24:32.521548 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4/init-config-reloader/0.log" Dec 09 18:24:32 crc kubenswrapper[4840]: I1209 18:24:32.713596 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4/init-config-reloader/0.log" Dec 09 18:24:32 crc kubenswrapper[4840]: I1209 18:24:32.745247 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4/config-reloader/0.log" Dec 09 18:24:32 crc kubenswrapper[4840]: I1209 18:24:32.764727 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4/prometheus/0.log" Dec 09 18:24:32 crc kubenswrapper[4840]: I1209 18:24:32.844491 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_edd5bd8a-5838-4ddc-b0a6-17d86a90e8e4/thanos-sidecar/0.log" Dec 09 18:24:32 crc kubenswrapper[4840]: I1209 18:24:32.950460 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_745eab59-21fe-492c-8a51-5f557f1802e3/setup-container/0.log" Dec 09 18:24:33 crc kubenswrapper[4840]: I1209 18:24:33.245375 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_0f054c36-b41d-4ef8-8d86-1a9ef134dba0/setup-container/0.log" Dec 09 18:24:33 crc kubenswrapper[4840]: I1209 18:24:33.256544 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_745eab59-21fe-492c-8a51-5f557f1802e3/setup-container/0.log" Dec 09 18:24:33 crc kubenswrapper[4840]: I1209 18:24:33.296800 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_745eab59-21fe-492c-8a51-5f557f1802e3/rabbitmq/0.log" Dec 09 18:24:33 crc kubenswrapper[4840]: I1209 18:24:33.490508 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_0f054c36-b41d-4ef8-8d86-1a9ef134dba0/rabbitmq/0.log" Dec 09 18:24:33 crc kubenswrapper[4840]: I1209 18:24:33.517442 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_0f054c36-b41d-4ef8-8d86-1a9ef134dba0/setup-container/0.log" Dec 09 18:24:33 crc kubenswrapper[4840]: I1209 18:24:33.565783 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-4fr4v_c9a73b5a-5966-4c16-9d5c-7ad9765e4b91/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 18:24:33 crc kubenswrapper[4840]: E1209 18:24:33.614272 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:24:33 crc kubenswrapper[4840]: I1209 18:24:33.762522 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-fwj5g_afb99ad0-9b95-4cad-a689-01347c7013c1/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 18:24:33 crc kubenswrapper[4840]: I1209 18:24:33.987846 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7d5bcffc7c-n8wfr_e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7/proxy-server/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.018587 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7d5bcffc7c-n8wfr_e6dbbeb6-1d77-472c-928f-c3abcaa2a8e7/proxy-httpd/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.033482 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-2xsrv_0e3eae38-0d51-4c6c-9258-41a7699cb1f1/swift-ring-rebalance/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.282257 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/account-reaper/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.309663 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/account-auditor/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.376550 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/account-replicator/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.489178 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/account-server/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.525212 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/container-replicator/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.577664 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/container-auditor/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.623850 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/container-server/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.737164 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/container-updater/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.780808 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/object-auditor/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.847785 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/object-expirer/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.931013 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/object-replicator/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.955275 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/object-server/0.log" Dec 09 18:24:34 crc kubenswrapper[4840]: I1209 18:24:34.983560 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/object-updater/0.log" Dec 09 18:24:35 crc kubenswrapper[4840]: I1209 18:24:35.116313 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/rsync/0.log" Dec 09 18:24:35 crc kubenswrapper[4840]: I1209 18:24:35.306305 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_fa3b5d5b-eafc-4e13-93ab-6eaf7a40e534/swift-recon-cron/0.log" Dec 09 18:24:41 crc kubenswrapper[4840]: I1209 18:24:41.675331 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_6cf8f20c-e36a-4ed4-b627-3b88423123c9/memcached/0.log" Dec 09 18:24:43 crc kubenswrapper[4840]: I1209 18:24:43.609021 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:24:44 crc kubenswrapper[4840]: I1209 18:24:44.491227 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"d8c10b7f78d31263d0a8a8b062489523e4cf7492edcfad92bd6b1f456eefb478"} Dec 09 18:24:44 crc kubenswrapper[4840]: E1209 18:24:44.616100 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:24:44 crc kubenswrapper[4840]: E1209 18:24:44.625393 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:24:57 crc kubenswrapper[4840]: E1209 18:24:57.610598 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:24:57 crc kubenswrapper[4840]: E1209 18:24:57.610823 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:25:05 crc kubenswrapper[4840]: I1209 18:25:05.823443 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d_8ce90f28-39dd-430b-8662-cf8d01eb6af1/util/0.log" Dec 09 18:25:05 crc kubenswrapper[4840]: I1209 18:25:05.949742 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d_8ce90f28-39dd-430b-8662-cf8d01eb6af1/pull/0.log" Dec 09 18:25:05 crc kubenswrapper[4840]: I1209 18:25:05.988412 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d_8ce90f28-39dd-430b-8662-cf8d01eb6af1/util/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.016039 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d_8ce90f28-39dd-430b-8662-cf8d01eb6af1/pull/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.111299 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d_8ce90f28-39dd-430b-8662-cf8d01eb6af1/util/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.175865 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d_8ce90f28-39dd-430b-8662-cf8d01eb6af1/extract/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.196695 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_84f5b8ce9b737d136e9d44026726099c3c124bf2a1b3be498eb888ce47rjr5d_8ce90f28-39dd-430b-8662-cf8d01eb6af1/pull/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.292314 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-g495k_3bf967ce-abdb-4d63-a262-861d238218e9/kube-rbac-proxy/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.411889 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-g495k_3bf967ce-abdb-4d63-a262-861d238218e9/manager/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.441103 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-v88tx_4637e86f-9342-431b-8bea-80027b740c6a/kube-rbac-proxy/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.559398 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-v88tx_4637e86f-9342-431b-8bea-80027b740c6a/manager/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.642066 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-4w9cf_63ca78d6-7a48-4fcf-bac3-7215c2ca3282/manager/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.643132 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-4w9cf_63ca78d6-7a48-4fcf-bac3-7215c2ca3282/kube-rbac-proxy/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.790087 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-dr2p5_09bdc1d3-b19f-4f25-b28a-e4e100108d48/kube-rbac-proxy/0.log" Dec 09 18:25:06 crc kubenswrapper[4840]: I1209 18:25:06.908252 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-dr2p5_09bdc1d3-b19f-4f25-b28a-e4e100108d48/manager/0.log" Dec 09 18:25:07 crc kubenswrapper[4840]: I1209 18:25:07.019578 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-jpd2s_1e390e20-35af-4e6b-87cf-7cdd9fa55898/manager/0.log" Dec 09 18:25:07 crc kubenswrapper[4840]: I1209 18:25:07.078868 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-h9lh4_a9b07484-f3d4-441d-8390-03d86f2ffe1f/kube-rbac-proxy/0.log" Dec 09 18:25:07 crc kubenswrapper[4840]: I1209 18:25:07.099889 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-jpd2s_1e390e20-35af-4e6b-87cf-7cdd9fa55898/kube-rbac-proxy/0.log" Dec 09 18:25:07 crc kubenswrapper[4840]: I1209 18:25:07.201515 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-h9lh4_a9b07484-f3d4-441d-8390-03d86f2ffe1f/manager/0.log" Dec 09 18:25:07 crc kubenswrapper[4840]: I1209 18:25:07.294943 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-zgj4l_cf5c5d51-0dfb-414d-9f08-3c9be6400df5/kube-rbac-proxy/0.log" Dec 09 18:25:07 crc kubenswrapper[4840]: I1209 18:25:07.524829 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-9mmdv_ce821a6e-c155-4d30-aa89-f56d2348821d/manager/0.log" Dec 09 18:25:07 crc kubenswrapper[4840]: I1209 18:25:07.527216 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-9mmdv_ce821a6e-c155-4d30-aa89-f56d2348821d/kube-rbac-proxy/0.log" Dec 09 18:25:07 crc kubenswrapper[4840]: I1209 18:25:07.618095 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-zgj4l_cf5c5d51-0dfb-414d-9f08-3c9be6400df5/manager/0.log" Dec 09 18:25:07 crc kubenswrapper[4840]: I1209 18:25:07.885168 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-2sjnt_6a6eb330-3cbb-44cd-aced-d66e6f3554e6/kube-rbac-proxy/0.log" Dec 09 18:25:07 crc kubenswrapper[4840]: I1209 18:25:07.945596 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-2sjnt_6a6eb330-3cbb-44cd-aced-d66e6f3554e6/manager/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.077426 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-wb8sz_1a555877-a028-46dd-bcf4-0202493c00b2/kube-rbac-proxy/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.089601 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-wb8sz_1a555877-a028-46dd-bcf4-0202493c00b2/manager/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.190250 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-cc2w8_8e1c1649-f8e1-4044-8c36-f4cfb12a929b/kube-rbac-proxy/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.280828 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-cc2w8_8e1c1649-f8e1-4044-8c36-f4cfb12a929b/manager/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.377743 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-mpgv7_2ec42045-f3cc-4418-8744-d6397ec73843/kube-rbac-proxy/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.418608 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-mpgv7_2ec42045-f3cc-4418-8744-d6397ec73843/manager/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.491300 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-r5zgb_a40ea926-2932-47de-89e0-1b7db3b1c6e9/kube-rbac-proxy/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.624072 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-r5zgb_a40ea926-2932-47de-89e0-1b7db3b1c6e9/manager/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.652838 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-vdldq_cdf3df3c-7d67-4096-95e2-779d5e413c46/kube-rbac-proxy/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.696766 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-vdldq_cdf3df3c-7d67-4096-95e2-779d5e413c46/manager/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.814934 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fmcphk_1f1441e7-48a5-433b-a3b7-882a3582ac88/kube-rbac-proxy/0.log" Dec 09 18:25:08 crc kubenswrapper[4840]: I1209 18:25:08.863709 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fmcphk_1f1441e7-48a5-433b-a3b7-882a3582ac88/manager/0.log" Dec 09 18:25:09 crc kubenswrapper[4840]: I1209 18:25:09.834263 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-6ft79_146acea0-42bf-4e51-b660-5577f8c2ea66/registry-server/0.log" Dec 09 18:25:09 crc kubenswrapper[4840]: I1209 18:25:09.910570 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-67x7g_a70c50dc-fee0-4c02-8ae7-6e41429292ef/kube-rbac-proxy/0.log" Dec 09 18:25:09 crc kubenswrapper[4840]: I1209 18:25:09.930331 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5f5557f974-mq8t5_b2677e8e-1651-477c-b3ae-b08cc1ab0e6c/operator/0.log" Dec 09 18:25:10 crc kubenswrapper[4840]: I1209 18:25:10.126691 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-67x7g_a70c50dc-fee0-4c02-8ae7-6e41429292ef/manager/0.log" Dec 09 18:25:10 crc kubenswrapper[4840]: I1209 18:25:10.187345 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-q5v56_298ab5a4-fb7d-42e4-8278-3972993456aa/kube-rbac-proxy/0.log" Dec 09 18:25:10 crc kubenswrapper[4840]: I1209 18:25:10.249440 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-q5v56_298ab5a4-fb7d-42e4-8278-3972993456aa/manager/0.log" Dec 09 18:25:10 crc kubenswrapper[4840]: I1209 18:25:10.398273 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-6xndz_6a1d9f43-3cd7-4480-a4f2-e88b82d972ab/operator/0.log" Dec 09 18:25:10 crc kubenswrapper[4840]: I1209 18:25:10.514199 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-x2hg6_fc539d46-da16-4f0b-8303-81fc7c35303b/kube-rbac-proxy/0.log" Dec 09 18:25:10 crc kubenswrapper[4840]: E1209 18:25:10.609663 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:25:10 crc kubenswrapper[4840]: I1209 18:25:10.635827 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-866b78c4d6-gnpwh_a0c4ab40-b641-4154-a607-dfe342057b15/manager/0.log" Dec 09 18:25:10 crc kubenswrapper[4840]: I1209 18:25:10.660813 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-x2hg6_fc539d46-da16-4f0b-8303-81fc7c35303b/manager/0.log" Dec 09 18:25:10 crc kubenswrapper[4840]: I1209 18:25:10.691916 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-796785f986-7mv2k_57253bfe-39c8-4ad9-99b4-b475a492083e/kube-rbac-proxy/0.log" Dec 09 18:25:11 crc kubenswrapper[4840]: I1209 18:25:11.311517 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-796785f986-7mv2k_57253bfe-39c8-4ad9-99b4-b475a492083e/manager/0.log" Dec 09 18:25:11 crc kubenswrapper[4840]: I1209 18:25:11.568875 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-667bd8d554-xc9jz_fb5216a9-c43c-4eb4-ba33-affa2a72dbc4/manager/0.log" Dec 09 18:25:11 crc kubenswrapper[4840]: I1209 18:25:11.573025 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-667bd8d554-xc9jz_fb5216a9-c43c-4eb4-ba33-affa2a72dbc4/kube-rbac-proxy/0.log" Dec 09 18:25:11 crc kubenswrapper[4840]: I1209 18:25:11.599641 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-4kqr2_adbbc8e9-2553-4096-89a3-133ba5a752b6/manager/0.log" Dec 09 18:25:11 crc kubenswrapper[4840]: E1209 18:25:11.611189 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:25:11 crc kubenswrapper[4840]: I1209 18:25:11.637470 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-4kqr2_adbbc8e9-2553-4096-89a3-133ba5a752b6/kube-rbac-proxy/0.log" Dec 09 18:25:21 crc kubenswrapper[4840]: E1209 18:25:21.610928 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:25:22 crc kubenswrapper[4840]: E1209 18:25:22.613340 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.805071 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l6rh5"] Dec 09 18:25:26 crc kubenswrapper[4840]: E1209 18:25:26.806021 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerName="extract-utilities" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.806033 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerName="extract-utilities" Dec 09 18:25:26 crc kubenswrapper[4840]: E1209 18:25:26.806048 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerName="extract-content" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.806054 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerName="extract-content" Dec 09 18:25:26 crc kubenswrapper[4840]: E1209 18:25:26.806074 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7ec76dc-58f5-460a-8ee7-5d40426cf8f4" containerName="container-00" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.806083 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7ec76dc-58f5-460a-8ee7-5d40426cf8f4" containerName="container-00" Dec 09 18:25:26 crc kubenswrapper[4840]: E1209 18:25:26.806107 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerName="registry-server" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.806113 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerName="registry-server" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.806321 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="55e0c89b-1e53-45e1-8c26-8381ebb8fcbf" containerName="registry-server" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.806333 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7ec76dc-58f5-460a-8ee7-5d40426cf8f4" containerName="container-00" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.808261 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.822685 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-utilities\") pod \"certified-operators-l6rh5\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.822784 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlbrz\" (UniqueName: \"kubernetes.io/projected/14800525-73dc-4e01-9136-e86e198e0566-kube-api-access-rlbrz\") pod \"certified-operators-l6rh5\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.822897 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-catalog-content\") pod \"certified-operators-l6rh5\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.823855 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l6rh5"] Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.924465 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-utilities\") pod \"certified-operators-l6rh5\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.924534 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlbrz\" (UniqueName: \"kubernetes.io/projected/14800525-73dc-4e01-9136-e86e198e0566-kube-api-access-rlbrz\") pod \"certified-operators-l6rh5\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.924594 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-catalog-content\") pod \"certified-operators-l6rh5\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.924929 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-utilities\") pod \"certified-operators-l6rh5\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.925052 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-catalog-content\") pod \"certified-operators-l6rh5\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:26 crc kubenswrapper[4840]: I1209 18:25:26.949128 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlbrz\" (UniqueName: \"kubernetes.io/projected/14800525-73dc-4e01-9136-e86e198e0566-kube-api-access-rlbrz\") pod \"certified-operators-l6rh5\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:27 crc kubenswrapper[4840]: I1209 18:25:27.128226 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:27 crc kubenswrapper[4840]: I1209 18:25:27.727536 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l6rh5"] Dec 09 18:25:27 crc kubenswrapper[4840]: I1209 18:25:27.905002 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l6rh5" event={"ID":"14800525-73dc-4e01-9136-e86e198e0566","Type":"ContainerStarted","Data":"08208ec7cec50db0a94865412962a7578b3bc05d8c0a5ce4820b19502636d39e"} Dec 09 18:25:28 crc kubenswrapper[4840]: E1209 18:25:28.169446 4840 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod14800525_73dc_4e01_9136_e86e198e0566.slice/crio-715b51e4d3858afc8ed84d3f0be72dd7b91afc9266270ac60aefe81b468971ff.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod14800525_73dc_4e01_9136_e86e198e0566.slice/crio-conmon-715b51e4d3858afc8ed84d3f0be72dd7b91afc9266270ac60aefe81b468971ff.scope\": RecentStats: unable to find data in memory cache]" Dec 09 18:25:28 crc kubenswrapper[4840]: I1209 18:25:28.918218 4840 generic.go:334] "Generic (PLEG): container finished" podID="14800525-73dc-4e01-9136-e86e198e0566" containerID="715b51e4d3858afc8ed84d3f0be72dd7b91afc9266270ac60aefe81b468971ff" exitCode=0 Dec 09 18:25:28 crc kubenswrapper[4840]: I1209 18:25:28.918329 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l6rh5" event={"ID":"14800525-73dc-4e01-9136-e86e198e0566","Type":"ContainerDied","Data":"715b51e4d3858afc8ed84d3f0be72dd7b91afc9266270ac60aefe81b468971ff"} Dec 09 18:25:29 crc kubenswrapper[4840]: I1209 18:25:29.928008 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l6rh5" event={"ID":"14800525-73dc-4e01-9136-e86e198e0566","Type":"ContainerStarted","Data":"d46bba17ef57333f0695434301091e92c4689cf8d3a448eefbdf16e4f5b27089"} Dec 09 18:25:30 crc kubenswrapper[4840]: I1209 18:25:30.942744 4840 generic.go:334] "Generic (PLEG): container finished" podID="14800525-73dc-4e01-9136-e86e198e0566" containerID="d46bba17ef57333f0695434301091e92c4689cf8d3a448eefbdf16e4f5b27089" exitCode=0 Dec 09 18:25:30 crc kubenswrapper[4840]: I1209 18:25:30.942827 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l6rh5" event={"ID":"14800525-73dc-4e01-9136-e86e198e0566","Type":"ContainerDied","Data":"d46bba17ef57333f0695434301091e92c4689cf8d3a448eefbdf16e4f5b27089"} Dec 09 18:25:31 crc kubenswrapper[4840]: I1209 18:25:31.954249 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l6rh5" event={"ID":"14800525-73dc-4e01-9136-e86e198e0566","Type":"ContainerStarted","Data":"ffed1b9e481fba105a292875280870f0830a4b053cd0335ad152a699ce86e3e3"} Dec 09 18:25:31 crc kubenswrapper[4840]: I1209 18:25:31.989575 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l6rh5" podStartSLOduration=3.494779183 podStartE2EDuration="5.989545919s" podCreationTimestamp="2025-12-09 18:25:26 +0000 UTC" firstStartedPulling="2025-12-09 18:25:28.922681446 +0000 UTC m=+5314.913792079" lastFinishedPulling="2025-12-09 18:25:31.417448172 +0000 UTC m=+5317.408558815" observedRunningTime="2025-12-09 18:25:31.976290776 +0000 UTC m=+5317.967401439" watchObservedRunningTime="2025-12-09 18:25:31.989545919 +0000 UTC m=+5317.980656582" Dec 09 18:25:33 crc kubenswrapper[4840]: I1209 18:25:33.622768 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-mtqnx_0978ab4b-fdc1-46ac-94e2-ead3135e1ceb/control-plane-machine-set-operator/0.log" Dec 09 18:25:33 crc kubenswrapper[4840]: I1209 18:25:33.819457 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-g5dvm_49c57793-db28-4be3-81ee-01570255716c/kube-rbac-proxy/0.log" Dec 09 18:25:33 crc kubenswrapper[4840]: I1209 18:25:33.910235 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-g5dvm_49c57793-db28-4be3-81ee-01570255716c/machine-api-operator/0.log" Dec 09 18:25:35 crc kubenswrapper[4840]: E1209 18:25:35.612192 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:25:36 crc kubenswrapper[4840]: E1209 18:25:36.610027 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:25:37 crc kubenswrapper[4840]: I1209 18:25:37.129221 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:37 crc kubenswrapper[4840]: I1209 18:25:37.129285 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:37 crc kubenswrapper[4840]: I1209 18:25:37.175180 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:38 crc kubenswrapper[4840]: I1209 18:25:38.066491 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:38 crc kubenswrapper[4840]: I1209 18:25:38.118745 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l6rh5"] Dec 09 18:25:40 crc kubenswrapper[4840]: I1209 18:25:40.039131 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l6rh5" podUID="14800525-73dc-4e01-9136-e86e198e0566" containerName="registry-server" containerID="cri-o://ffed1b9e481fba105a292875280870f0830a4b053cd0335ad152a699ce86e3e3" gracePeriod=2 Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.051460 4840 generic.go:334] "Generic (PLEG): container finished" podID="14800525-73dc-4e01-9136-e86e198e0566" containerID="ffed1b9e481fba105a292875280870f0830a4b053cd0335ad152a699ce86e3e3" exitCode=0 Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.051604 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l6rh5" event={"ID":"14800525-73dc-4e01-9136-e86e198e0566","Type":"ContainerDied","Data":"ffed1b9e481fba105a292875280870f0830a4b053cd0335ad152a699ce86e3e3"} Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.051832 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l6rh5" event={"ID":"14800525-73dc-4e01-9136-e86e198e0566","Type":"ContainerDied","Data":"08208ec7cec50db0a94865412962a7578b3bc05d8c0a5ce4820b19502636d39e"} Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.051855 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="08208ec7cec50db0a94865412962a7578b3bc05d8c0a5ce4820b19502636d39e" Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.106745 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.233445 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-utilities\") pod \"14800525-73dc-4e01-9136-e86e198e0566\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.233485 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlbrz\" (UniqueName: \"kubernetes.io/projected/14800525-73dc-4e01-9136-e86e198e0566-kube-api-access-rlbrz\") pod \"14800525-73dc-4e01-9136-e86e198e0566\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.233536 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-catalog-content\") pod \"14800525-73dc-4e01-9136-e86e198e0566\" (UID: \"14800525-73dc-4e01-9136-e86e198e0566\") " Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.234643 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-utilities" (OuterVolumeSpecName: "utilities") pod "14800525-73dc-4e01-9136-e86e198e0566" (UID: "14800525-73dc-4e01-9136-e86e198e0566"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.239050 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14800525-73dc-4e01-9136-e86e198e0566-kube-api-access-rlbrz" (OuterVolumeSpecName: "kube-api-access-rlbrz") pod "14800525-73dc-4e01-9136-e86e198e0566" (UID: "14800525-73dc-4e01-9136-e86e198e0566"). InnerVolumeSpecName "kube-api-access-rlbrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.279820 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "14800525-73dc-4e01-9136-e86e198e0566" (UID: "14800525-73dc-4e01-9136-e86e198e0566"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.337430 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.337497 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlbrz\" (UniqueName: \"kubernetes.io/projected/14800525-73dc-4e01-9136-e86e198e0566-kube-api-access-rlbrz\") on node \"crc\" DevicePath \"\"" Dec 09 18:25:41 crc kubenswrapper[4840]: I1209 18:25:41.337527 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14800525-73dc-4e01-9136-e86e198e0566-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:25:42 crc kubenswrapper[4840]: I1209 18:25:42.059670 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l6rh5" Dec 09 18:25:42 crc kubenswrapper[4840]: I1209 18:25:42.095878 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l6rh5"] Dec 09 18:25:42 crc kubenswrapper[4840]: I1209 18:25:42.108478 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l6rh5"] Dec 09 18:25:42 crc kubenswrapper[4840]: I1209 18:25:42.620215 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14800525-73dc-4e01-9136-e86e198e0566" path="/var/lib/kubelet/pods/14800525-73dc-4e01-9136-e86e198e0566/volumes" Dec 09 18:25:48 crc kubenswrapper[4840]: I1209 18:25:48.128237 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-6wmh8_a56be419-62f5-4c5e-87dc-11c097f51918/cert-manager-controller/0.log" Dec 09 18:25:48 crc kubenswrapper[4840]: I1209 18:25:48.298857 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-rvq4r_96234cfc-2b1f-4a48-8631-c22932a7129e/cert-manager-cainjector/0.log" Dec 09 18:25:48 crc kubenswrapper[4840]: I1209 18:25:48.355437 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-n9tqx_f7a8b3dc-71ea-45c4-9699-4c7194a5d90f/cert-manager-webhook/0.log" Dec 09 18:25:50 crc kubenswrapper[4840]: E1209 18:25:50.611296 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:25:51 crc kubenswrapper[4840]: E1209 18:25:51.617046 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.664413 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cnm8c"] Dec 09 18:25:53 crc kubenswrapper[4840]: E1209 18:25:53.665343 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14800525-73dc-4e01-9136-e86e198e0566" containerName="extract-utilities" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.665358 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="14800525-73dc-4e01-9136-e86e198e0566" containerName="extract-utilities" Dec 09 18:25:53 crc kubenswrapper[4840]: E1209 18:25:53.665379 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14800525-73dc-4e01-9136-e86e198e0566" containerName="registry-server" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.665385 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="14800525-73dc-4e01-9136-e86e198e0566" containerName="registry-server" Dec 09 18:25:53 crc kubenswrapper[4840]: E1209 18:25:53.665405 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14800525-73dc-4e01-9136-e86e198e0566" containerName="extract-content" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.665411 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="14800525-73dc-4e01-9136-e86e198e0566" containerName="extract-content" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.665610 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="14800525-73dc-4e01-9136-e86e198e0566" containerName="registry-server" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.667193 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.676112 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cnm8c"] Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.824001 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-catalog-content\") pod \"redhat-marketplace-cnm8c\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.824076 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5lx9\" (UniqueName: \"kubernetes.io/projected/d1f70057-6e36-41eb-8a78-b4badbea394e-kube-api-access-m5lx9\") pod \"redhat-marketplace-cnm8c\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.824896 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-utilities\") pod \"redhat-marketplace-cnm8c\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.926573 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-utilities\") pod \"redhat-marketplace-cnm8c\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.926679 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-catalog-content\") pod \"redhat-marketplace-cnm8c\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.926737 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5lx9\" (UniqueName: \"kubernetes.io/projected/d1f70057-6e36-41eb-8a78-b4badbea394e-kube-api-access-m5lx9\") pod \"redhat-marketplace-cnm8c\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.927110 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-utilities\") pod \"redhat-marketplace-cnm8c\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.927151 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-catalog-content\") pod \"redhat-marketplace-cnm8c\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.947019 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5lx9\" (UniqueName: \"kubernetes.io/projected/d1f70057-6e36-41eb-8a78-b4badbea394e-kube-api-access-m5lx9\") pod \"redhat-marketplace-cnm8c\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:25:53 crc kubenswrapper[4840]: I1209 18:25:53.996114 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:25:54 crc kubenswrapper[4840]: W1209 18:25:54.510485 4840 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1f70057_6e36_41eb_8a78_b4badbea394e.slice/crio-00c9bf72b6440263776b87b155eb8b55fe58a0601d83ec446e0015922c614a30 WatchSource:0}: Error finding container 00c9bf72b6440263776b87b155eb8b55fe58a0601d83ec446e0015922c614a30: Status 404 returned error can't find the container with id 00c9bf72b6440263776b87b155eb8b55fe58a0601d83ec446e0015922c614a30 Dec 09 18:25:54 crc kubenswrapper[4840]: I1209 18:25:54.517764 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cnm8c"] Dec 09 18:25:55 crc kubenswrapper[4840]: I1209 18:25:55.192516 4840 generic.go:334] "Generic (PLEG): container finished" podID="d1f70057-6e36-41eb-8a78-b4badbea394e" containerID="518bdae5db8d355fda866bdf04e096b78a98f55ff298a135de634ee676129d7e" exitCode=0 Dec 09 18:25:55 crc kubenswrapper[4840]: I1209 18:25:55.192610 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnm8c" event={"ID":"d1f70057-6e36-41eb-8a78-b4badbea394e","Type":"ContainerDied","Data":"518bdae5db8d355fda866bdf04e096b78a98f55ff298a135de634ee676129d7e"} Dec 09 18:25:55 crc kubenswrapper[4840]: I1209 18:25:55.192812 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnm8c" event={"ID":"d1f70057-6e36-41eb-8a78-b4badbea394e","Type":"ContainerStarted","Data":"00c9bf72b6440263776b87b155eb8b55fe58a0601d83ec446e0015922c614a30"} Dec 09 18:25:56 crc kubenswrapper[4840]: I1209 18:25:56.206228 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnm8c" event={"ID":"d1f70057-6e36-41eb-8a78-b4badbea394e","Type":"ContainerStarted","Data":"f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf"} Dec 09 18:25:57 crc kubenswrapper[4840]: I1209 18:25:57.218408 4840 generic.go:334] "Generic (PLEG): container finished" podID="d1f70057-6e36-41eb-8a78-b4badbea394e" containerID="f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf" exitCode=0 Dec 09 18:25:57 crc kubenswrapper[4840]: I1209 18:25:57.218832 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnm8c" event={"ID":"d1f70057-6e36-41eb-8a78-b4badbea394e","Type":"ContainerDied","Data":"f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf"} Dec 09 18:25:58 crc kubenswrapper[4840]: I1209 18:25:58.232468 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnm8c" event={"ID":"d1f70057-6e36-41eb-8a78-b4badbea394e","Type":"ContainerStarted","Data":"6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00"} Dec 09 18:25:58 crc kubenswrapper[4840]: I1209 18:25:58.259279 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cnm8c" podStartSLOduration=2.754447124 podStartE2EDuration="5.259258734s" podCreationTimestamp="2025-12-09 18:25:53 +0000 UTC" firstStartedPulling="2025-12-09 18:25:55.19594935 +0000 UTC m=+5341.187060033" lastFinishedPulling="2025-12-09 18:25:57.70076101 +0000 UTC m=+5343.691871643" observedRunningTime="2025-12-09 18:25:58.250188779 +0000 UTC m=+5344.241299412" watchObservedRunningTime="2025-12-09 18:25:58.259258734 +0000 UTC m=+5344.250369367" Dec 09 18:26:02 crc kubenswrapper[4840]: I1209 18:26:02.958134 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-wblsr_8a0577b2-63ef-4d11-8f94-e847b9c5a520/nmstate-console-plugin/0.log" Dec 09 18:26:03 crc kubenswrapper[4840]: I1209 18:26:03.148902 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-7s69c_e80364ae-306f-494d-aaa8-da74475771d0/nmstate-handler/0.log" Dec 09 18:26:03 crc kubenswrapper[4840]: I1209 18:26:03.206389 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-rtwkf_ca745f75-67ee-4db8-b8e9-49a7ab4cf95e/nmstate-metrics/0.log" Dec 09 18:26:03 crc kubenswrapper[4840]: I1209 18:26:03.504589 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-wbtkf_0c8f9850-74a1-43bb-b4f1-07e8e7ea0c7b/nmstate-webhook/0.log" Dec 09 18:26:03 crc kubenswrapper[4840]: I1209 18:26:03.507596 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-rtwkf_ca745f75-67ee-4db8-b8e9-49a7ab4cf95e/kube-rbac-proxy/0.log" Dec 09 18:26:03 crc kubenswrapper[4840]: I1209 18:26:03.509751 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-k2xmp_86e73f61-98bb-4332-9494-13c663fd8de7/nmstate-operator/0.log" Dec 09 18:26:03 crc kubenswrapper[4840]: E1209 18:26:03.610402 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:26:03 crc kubenswrapper[4840]: I1209 18:26:03.996273 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:26:03 crc kubenswrapper[4840]: I1209 18:26:03.996327 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:26:04 crc kubenswrapper[4840]: I1209 18:26:04.070468 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:26:04 crc kubenswrapper[4840]: I1209 18:26:04.364917 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:26:04 crc kubenswrapper[4840]: E1209 18:26:04.615529 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:26:05 crc kubenswrapper[4840]: I1209 18:26:05.243753 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cnm8c"] Dec 09 18:26:06 crc kubenswrapper[4840]: I1209 18:26:06.336429 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cnm8c" podUID="d1f70057-6e36-41eb-8a78-b4badbea394e" containerName="registry-server" containerID="cri-o://6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00" gracePeriod=2 Dec 09 18:26:06 crc kubenswrapper[4840]: I1209 18:26:06.922356 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.117762 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-catalog-content\") pod \"d1f70057-6e36-41eb-8a78-b4badbea394e\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.118165 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-utilities\") pod \"d1f70057-6e36-41eb-8a78-b4badbea394e\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.118269 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5lx9\" (UniqueName: \"kubernetes.io/projected/d1f70057-6e36-41eb-8a78-b4badbea394e-kube-api-access-m5lx9\") pod \"d1f70057-6e36-41eb-8a78-b4badbea394e\" (UID: \"d1f70057-6e36-41eb-8a78-b4badbea394e\") " Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.118793 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-utilities" (OuterVolumeSpecName: "utilities") pod "d1f70057-6e36-41eb-8a78-b4badbea394e" (UID: "d1f70057-6e36-41eb-8a78-b4badbea394e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.126487 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1f70057-6e36-41eb-8a78-b4badbea394e-kube-api-access-m5lx9" (OuterVolumeSpecName: "kube-api-access-m5lx9") pod "d1f70057-6e36-41eb-8a78-b4badbea394e" (UID: "d1f70057-6e36-41eb-8a78-b4badbea394e"). InnerVolumeSpecName "kube-api-access-m5lx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.146478 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d1f70057-6e36-41eb-8a78-b4badbea394e" (UID: "d1f70057-6e36-41eb-8a78-b4badbea394e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.220750 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5lx9\" (UniqueName: \"kubernetes.io/projected/d1f70057-6e36-41eb-8a78-b4badbea394e-kube-api-access-m5lx9\") on node \"crc\" DevicePath \"\"" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.220939 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.221015 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f70057-6e36-41eb-8a78-b4badbea394e-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.353570 4840 generic.go:334] "Generic (PLEG): container finished" podID="d1f70057-6e36-41eb-8a78-b4badbea394e" containerID="6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00" exitCode=0 Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.353636 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnm8c" event={"ID":"d1f70057-6e36-41eb-8a78-b4badbea394e","Type":"ContainerDied","Data":"6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00"} Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.353658 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cnm8c" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.353697 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnm8c" event={"ID":"d1f70057-6e36-41eb-8a78-b4badbea394e","Type":"ContainerDied","Data":"00c9bf72b6440263776b87b155eb8b55fe58a0601d83ec446e0015922c614a30"} Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.353728 4840 scope.go:117] "RemoveContainer" containerID="6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.377241 4840 scope.go:117] "RemoveContainer" containerID="f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.392039 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cnm8c"] Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.401393 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cnm8c"] Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.414695 4840 scope.go:117] "RemoveContainer" containerID="518bdae5db8d355fda866bdf04e096b78a98f55ff298a135de634ee676129d7e" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.456734 4840 scope.go:117] "RemoveContainer" containerID="6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00" Dec 09 18:26:07 crc kubenswrapper[4840]: E1209 18:26:07.457360 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00\": container with ID starting with 6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00 not found: ID does not exist" containerID="6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.457394 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00"} err="failed to get container status \"6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00\": rpc error: code = NotFound desc = could not find container \"6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00\": container with ID starting with 6138034dacb229be4283cc8e801113ab26db780ba5655b01cd36a5722d458a00 not found: ID does not exist" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.457415 4840 scope.go:117] "RemoveContainer" containerID="f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf" Dec 09 18:26:07 crc kubenswrapper[4840]: E1209 18:26:07.457755 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf\": container with ID starting with f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf not found: ID does not exist" containerID="f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.457868 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf"} err="failed to get container status \"f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf\": rpc error: code = NotFound desc = could not find container \"f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf\": container with ID starting with f3849dff3050801088440bce2d6dc0762b910f1aab9b8c86c3fd37c5c07187bf not found: ID does not exist" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.458022 4840 scope.go:117] "RemoveContainer" containerID="518bdae5db8d355fda866bdf04e096b78a98f55ff298a135de634ee676129d7e" Dec 09 18:26:07 crc kubenswrapper[4840]: E1209 18:26:07.458492 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"518bdae5db8d355fda866bdf04e096b78a98f55ff298a135de634ee676129d7e\": container with ID starting with 518bdae5db8d355fda866bdf04e096b78a98f55ff298a135de634ee676129d7e not found: ID does not exist" containerID="518bdae5db8d355fda866bdf04e096b78a98f55ff298a135de634ee676129d7e" Dec 09 18:26:07 crc kubenswrapper[4840]: I1209 18:26:07.458566 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"518bdae5db8d355fda866bdf04e096b78a98f55ff298a135de634ee676129d7e"} err="failed to get container status \"518bdae5db8d355fda866bdf04e096b78a98f55ff298a135de634ee676129d7e\": rpc error: code = NotFound desc = could not find container \"518bdae5db8d355fda866bdf04e096b78a98f55ff298a135de634ee676129d7e\": container with ID starting with 518bdae5db8d355fda866bdf04e096b78a98f55ff298a135de634ee676129d7e not found: ID does not exist" Dec 09 18:26:08 crc kubenswrapper[4840]: I1209 18:26:08.624422 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1f70057-6e36-41eb-8a78-b4badbea394e" path="/var/lib/kubelet/pods/d1f70057-6e36-41eb-8a78-b4badbea394e/volumes" Dec 09 18:26:17 crc kubenswrapper[4840]: I1209 18:26:17.133772 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-648c77c5bd-sjprw_e0cfa500-d9f5-41c2-a215-661d8d8976cf/kube-rbac-proxy/0.log" Dec 09 18:26:17 crc kubenswrapper[4840]: I1209 18:26:17.157318 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-648c77c5bd-sjprw_e0cfa500-d9f5-41c2-a215-661d8d8976cf/manager/0.log" Dec 09 18:26:18 crc kubenswrapper[4840]: E1209 18:26:18.616112 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:26:18 crc kubenswrapper[4840]: E1209 18:26:18.616223 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:26:30 crc kubenswrapper[4840]: E1209 18:26:30.614606 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:26:31 crc kubenswrapper[4840]: I1209 18:26:31.142467 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-6zzjs_26db22e3-364b-4813-878c-ef0d99a342e8/kube-rbac-proxy/0.log" Dec 09 18:26:31 crc kubenswrapper[4840]: I1209 18:26:31.174917 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-6zzjs_26db22e3-364b-4813-878c-ef0d99a342e8/controller/0.log" Dec 09 18:26:31 crc kubenswrapper[4840]: I1209 18:26:31.345225 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-frr-files/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.155694 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-frr-files/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.175692 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-metrics/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.179131 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-reloader/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.182808 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-reloader/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.337328 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-frr-files/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.380020 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-metrics/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.394272 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-metrics/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.411667 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-reloader/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.584248 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-frr-files/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.597152 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-metrics/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.604861 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/cp-reloader/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: E1209 18:26:32.609994 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.629340 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/controller/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.808397 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/kube-rbac-proxy/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.813775 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/frr-metrics/0.log" Dec 09 18:26:32 crc kubenswrapper[4840]: I1209 18:26:32.838282 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/kube-rbac-proxy-frr/0.log" Dec 09 18:26:33 crc kubenswrapper[4840]: I1209 18:26:33.032556 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/reloader/0.log" Dec 09 18:26:33 crc kubenswrapper[4840]: I1209 18:26:33.070153 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-n7s2z_b871bd33-2669-454b-80ce-fd914f836d1d/frr-k8s-webhook-server/0.log" Dec 09 18:26:33 crc kubenswrapper[4840]: I1209 18:26:33.281271 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5fff64b888-svqsz_c3eb49b5-e79c-4d7f-8395-18217dbcc4a9/manager/0.log" Dec 09 18:26:34 crc kubenswrapper[4840]: I1209 18:26:34.121872 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-f9dc74597-72bpz_8c103b24-d2e6-413d-a074-60609d33c8fd/webhook-server/0.log" Dec 09 18:26:34 crc kubenswrapper[4840]: I1209 18:26:34.319041 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-h879g_eb375d89-155d-44fb-ad5b-f9cca1276898/frr/0.log" Dec 09 18:26:34 crc kubenswrapper[4840]: I1209 18:26:34.370372 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-9bdk4_35e4931f-7227-403e-aaf9-2426fdef84d8/kube-rbac-proxy/0.log" Dec 09 18:26:34 crc kubenswrapper[4840]: I1209 18:26:34.742164 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-9bdk4_35e4931f-7227-403e-aaf9-2426fdef84d8/speaker/0.log" Dec 09 18:26:45 crc kubenswrapper[4840]: E1209 18:26:45.611419 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:26:47 crc kubenswrapper[4840]: E1209 18:26:47.610132 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:26:48 crc kubenswrapper[4840]: I1209 18:26:48.268656 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj_0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb/util/0.log" Dec 09 18:26:48 crc kubenswrapper[4840]: I1209 18:26:48.519327 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj_0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb/pull/0.log" Dec 09 18:26:48 crc kubenswrapper[4840]: I1209 18:26:48.546297 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj_0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb/util/0.log" Dec 09 18:26:48 crc kubenswrapper[4840]: I1209 18:26:48.583704 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj_0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb/pull/0.log" Dec 09 18:26:49 crc kubenswrapper[4840]: I1209 18:26:49.576863 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj_0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb/util/0.log" Dec 09 18:26:49 crc kubenswrapper[4840]: I1209 18:26:49.698720 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj_0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb/extract/0.log" Dec 09 18:26:49 crc kubenswrapper[4840]: I1209 18:26:49.714252 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fx9rxj_0b65d3b0-1fc6-4313-8f8b-c9216de1f2cb/pull/0.log" Dec 09 18:26:49 crc kubenswrapper[4840]: I1209 18:26:49.773583 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82_a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6/util/0.log" Dec 09 18:26:49 crc kubenswrapper[4840]: I1209 18:26:49.967985 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82_a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6/util/0.log" Dec 09 18:26:49 crc kubenswrapper[4840]: I1209 18:26:49.982197 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82_a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6/pull/0.log" Dec 09 18:26:49 crc kubenswrapper[4840]: I1209 18:26:49.994948 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82_a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6/pull/0.log" Dec 09 18:26:50 crc kubenswrapper[4840]: I1209 18:26:50.220222 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82_a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6/pull/0.log" Dec 09 18:26:50 crc kubenswrapper[4840]: I1209 18:26:50.248025 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82_a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6/extract/0.log" Dec 09 18:26:50 crc kubenswrapper[4840]: I1209 18:26:50.248417 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210f9h82_a089e1ce-d2c8-459a-82bf-cdf3dfc9a8d6/util/0.log" Dec 09 18:26:50 crc kubenswrapper[4840]: I1209 18:26:50.426943 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc_9264f055-61da-418d-8dad-4b0c00694797/util/0.log" Dec 09 18:26:50 crc kubenswrapper[4840]: I1209 18:26:50.601679 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc_9264f055-61da-418d-8dad-4b0c00694797/pull/0.log" Dec 09 18:26:50 crc kubenswrapper[4840]: I1209 18:26:50.651913 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc_9264f055-61da-418d-8dad-4b0c00694797/util/0.log" Dec 09 18:26:50 crc kubenswrapper[4840]: I1209 18:26:50.671822 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc_9264f055-61da-418d-8dad-4b0c00694797/pull/0.log" Dec 09 18:26:51 crc kubenswrapper[4840]: I1209 18:26:51.436077 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc_9264f055-61da-418d-8dad-4b0c00694797/util/0.log" Dec 09 18:26:51 crc kubenswrapper[4840]: I1209 18:26:51.549098 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc_9264f055-61da-418d-8dad-4b0c00694797/extract/0.log" Dec 09 18:26:51 crc kubenswrapper[4840]: I1209 18:26:51.586802 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c148wkc_9264f055-61da-418d-8dad-4b0c00694797/pull/0.log" Dec 09 18:26:51 crc kubenswrapper[4840]: I1209 18:26:51.632957 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg_9310120d-a137-4eed-aaf9-e0d4dc85376b/util/0.log" Dec 09 18:26:51 crc kubenswrapper[4840]: I1209 18:26:51.778627 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg_9310120d-a137-4eed-aaf9-e0d4dc85376b/util/0.log" Dec 09 18:26:51 crc kubenswrapper[4840]: I1209 18:26:51.795726 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg_9310120d-a137-4eed-aaf9-e0d4dc85376b/pull/0.log" Dec 09 18:26:51 crc kubenswrapper[4840]: I1209 18:26:51.832288 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg_9310120d-a137-4eed-aaf9-e0d4dc85376b/pull/0.log" Dec 09 18:26:51 crc kubenswrapper[4840]: I1209 18:26:51.955149 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg_9310120d-a137-4eed-aaf9-e0d4dc85376b/util/0.log" Dec 09 18:26:51 crc kubenswrapper[4840]: I1209 18:26:51.998859 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg_9310120d-a137-4eed-aaf9-e0d4dc85376b/extract/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.036241 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83djhzg_9310120d-a137-4eed-aaf9-e0d4dc85376b/pull/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.043639 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bqzvr_c1a67e7b-6b6f-4295-893f-43a6225efa13/extract-utilities/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.297083 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bqzvr_c1a67e7b-6b6f-4295-893f-43a6225efa13/extract-utilities/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.303024 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bqzvr_c1a67e7b-6b6f-4295-893f-43a6225efa13/extract-content/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.329017 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bqzvr_c1a67e7b-6b6f-4295-893f-43a6225efa13/extract-content/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.463611 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bqzvr_c1a67e7b-6b6f-4295-893f-43a6225efa13/extract-content/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.490447 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bqzvr_c1a67e7b-6b6f-4295-893f-43a6225efa13/extract-utilities/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.554998 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kn5mq_8be887e1-067d-4f90-ba57-c335c0cc3346/extract-utilities/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.781914 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bqzvr_c1a67e7b-6b6f-4295-893f-43a6225efa13/registry-server/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.841325 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kn5mq_8be887e1-067d-4f90-ba57-c335c0cc3346/extract-content/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.846505 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kn5mq_8be887e1-067d-4f90-ba57-c335c0cc3346/extract-utilities/0.log" Dec 09 18:26:52 crc kubenswrapper[4840]: I1209 18:26:52.850386 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kn5mq_8be887e1-067d-4f90-ba57-c335c0cc3346/extract-content/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.078955 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kn5mq_8be887e1-067d-4f90-ba57-c335c0cc3346/extract-content/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.086762 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kn5mq_8be887e1-067d-4f90-ba57-c335c0cc3346/extract-utilities/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.091315 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-jwf2s_0448e5c8-5dda-4bb9-a501-b76890d0bf29/marketplace-operator/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.154313 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kn5mq_8be887e1-067d-4f90-ba57-c335c0cc3346/registry-server/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.269602 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qpk4p_fffa1314-10f7-4218-9351-34f74565e0b9/extract-utilities/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.451042 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qpk4p_fffa1314-10f7-4218-9351-34f74565e0b9/extract-content/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.451471 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qpk4p_fffa1314-10f7-4218-9351-34f74565e0b9/extract-utilities/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.508462 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qpk4p_fffa1314-10f7-4218-9351-34f74565e0b9/extract-content/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.673795 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qpk4p_fffa1314-10f7-4218-9351-34f74565e0b9/extract-content/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.758619 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qpk4p_fffa1314-10f7-4218-9351-34f74565e0b9/extract-utilities/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.781767 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g6b8g_43bf6f9b-4624-4f32-828e-1ad2b7de2aa7/extract-utilities/0.log" Dec 09 18:26:53 crc kubenswrapper[4840]: I1209 18:26:53.900232 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qpk4p_fffa1314-10f7-4218-9351-34f74565e0b9/registry-server/0.log" Dec 09 18:26:54 crc kubenswrapper[4840]: I1209 18:26:54.022288 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g6b8g_43bf6f9b-4624-4f32-828e-1ad2b7de2aa7/extract-utilities/0.log" Dec 09 18:26:54 crc kubenswrapper[4840]: I1209 18:26:54.053321 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g6b8g_43bf6f9b-4624-4f32-828e-1ad2b7de2aa7/extract-content/0.log" Dec 09 18:26:54 crc kubenswrapper[4840]: I1209 18:26:54.064332 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g6b8g_43bf6f9b-4624-4f32-828e-1ad2b7de2aa7/extract-content/0.log" Dec 09 18:26:54 crc kubenswrapper[4840]: I1209 18:26:54.217554 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g6b8g_43bf6f9b-4624-4f32-828e-1ad2b7de2aa7/extract-content/0.log" Dec 09 18:26:54 crc kubenswrapper[4840]: I1209 18:26:54.236382 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g6b8g_43bf6f9b-4624-4f32-828e-1ad2b7de2aa7/extract-utilities/0.log" Dec 09 18:26:54 crc kubenswrapper[4840]: I1209 18:26:54.951326 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g6b8g_43bf6f9b-4624-4f32-828e-1ad2b7de2aa7/registry-server/0.log" Dec 09 18:26:57 crc kubenswrapper[4840]: E1209 18:26:57.610662 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:27:02 crc kubenswrapper[4840]: E1209 18:27:02.610379 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:27:04 crc kubenswrapper[4840]: I1209 18:27:04.036390 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:27:04 crc kubenswrapper[4840]: I1209 18:27:04.036715 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:27:09 crc kubenswrapper[4840]: I1209 18:27:09.413470 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-qdpq8_251a8643-2e5b-4ac3-997d-b275bb1f6d25/prometheus-operator/0.log" Dec 09 18:27:10 crc kubenswrapper[4840]: I1209 18:27:10.360434 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7c9fdb89-sks2j_1e62864a-4c1d-4543-a379-ce30d0e68ea6/prometheus-operator-admission-webhook/0.log" Dec 09 18:27:10 crc kubenswrapper[4840]: I1209 18:27:10.363559 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7c9fdb89-vnvvz_c9b0db50-828e-4e51-9ea5-099a055f6c0f/prometheus-operator-admission-webhook/0.log" Dec 09 18:27:10 crc kubenswrapper[4840]: I1209 18:27:10.538051 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-sqt7j_ddf0a1fc-865e-44d5-b4e9-f470cbcbc031/operator/0.log" Dec 09 18:27:10 crc kubenswrapper[4840]: I1209 18:27:10.558135 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-wc2mq_06d88094-c4e9-4872-a546-6c42f9626286/perses-operator/0.log" Dec 09 18:27:12 crc kubenswrapper[4840]: E1209 18:27:12.615055 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:27:14 crc kubenswrapper[4840]: E1209 18:27:14.619031 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:27:24 crc kubenswrapper[4840]: I1209 18:27:24.832672 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-648c77c5bd-sjprw_e0cfa500-d9f5-41c2-a215-661d8d8976cf/kube-rbac-proxy/0.log" Dec 09 18:27:24 crc kubenswrapper[4840]: I1209 18:27:24.873169 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-648c77c5bd-sjprw_e0cfa500-d9f5-41c2-a215-661d8d8976cf/manager/0.log" Dec 09 18:27:26 crc kubenswrapper[4840]: E1209 18:27:26.612312 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:27:29 crc kubenswrapper[4840]: E1209 18:27:29.610837 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:27:34 crc kubenswrapper[4840]: I1209 18:27:34.036124 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:27:34 crc kubenswrapper[4840]: I1209 18:27:34.036605 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:27:38 crc kubenswrapper[4840]: E1209 18:27:38.609717 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:27:40 crc kubenswrapper[4840]: E1209 18:27:40.610276 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:27:50 crc kubenswrapper[4840]: E1209 18:27:50.612341 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:27:55 crc kubenswrapper[4840]: E1209 18:27:55.611116 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:28:04 crc kubenswrapper[4840]: I1209 18:28:04.036499 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:28:04 crc kubenswrapper[4840]: I1209 18:28:04.037309 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:28:04 crc kubenswrapper[4840]: I1209 18:28:04.037383 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 18:28:04 crc kubenswrapper[4840]: I1209 18:28:04.038597 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d8c10b7f78d31263d0a8a8b062489523e4cf7492edcfad92bd6b1f456eefb478"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 18:28:04 crc kubenswrapper[4840]: I1209 18:28:04.038691 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://d8c10b7f78d31263d0a8a8b062489523e4cf7492edcfad92bd6b1f456eefb478" gracePeriod=600 Dec 09 18:28:04 crc kubenswrapper[4840]: I1209 18:28:04.491324 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="d8c10b7f78d31263d0a8a8b062489523e4cf7492edcfad92bd6b1f456eefb478" exitCode=0 Dec 09 18:28:04 crc kubenswrapper[4840]: I1209 18:28:04.491366 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"d8c10b7f78d31263d0a8a8b062489523e4cf7492edcfad92bd6b1f456eefb478"} Dec 09 18:28:04 crc kubenswrapper[4840]: I1209 18:28:04.491399 4840 scope.go:117] "RemoveContainer" containerID="3ddb8d6159023b310af8ae056a97a28d60f96452b621dacbfbd0f8a3abcc8210" Dec 09 18:28:05 crc kubenswrapper[4840]: I1209 18:28:05.500427 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerStarted","Data":"b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d"} Dec 09 18:28:05 crc kubenswrapper[4840]: E1209 18:28:05.611850 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:28:06 crc kubenswrapper[4840]: E1209 18:28:06.613310 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:28:16 crc kubenswrapper[4840]: E1209 18:28:16.612905 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:28:18 crc kubenswrapper[4840]: E1209 18:28:18.611798 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:28:31 crc kubenswrapper[4840]: E1209 18:28:31.613983 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:28:32 crc kubenswrapper[4840]: E1209 18:28:32.617613 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:28:44 crc kubenswrapper[4840]: I1209 18:28:44.630479 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 18:28:44 crc kubenswrapper[4840]: E1209 18:28:44.833156 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:28:44 crc kubenswrapper[4840]: E1209 18:28:44.833231 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:28:44 crc kubenswrapper[4840]: E1209 18:28:44.833447 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:28:44 crc kubenswrapper[4840]: E1209 18:28:44.834782 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:28:46 crc kubenswrapper[4840]: E1209 18:28:46.612037 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:28:56 crc kubenswrapper[4840]: E1209 18:28:56.611204 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:28:57 crc kubenswrapper[4840]: E1209 18:28:57.610373 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:28:58 crc kubenswrapper[4840]: I1209 18:28:58.093072 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe16bca5-1a60-4cb6-a11c-474f241a53fa" containerID="e850b1969e32236f5cf584f245aa47c22e976a16052c9f197e831041aa1fecd3" exitCode=0 Dec 09 18:28:58 crc kubenswrapper[4840]: I1209 18:28:58.093156 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lv92q/must-gather-2pps9" event={"ID":"fe16bca5-1a60-4cb6-a11c-474f241a53fa","Type":"ContainerDied","Data":"e850b1969e32236f5cf584f245aa47c22e976a16052c9f197e831041aa1fecd3"} Dec 09 18:28:58 crc kubenswrapper[4840]: I1209 18:28:58.094246 4840 scope.go:117] "RemoveContainer" containerID="e850b1969e32236f5cf584f245aa47c22e976a16052c9f197e831041aa1fecd3" Dec 09 18:28:58 crc kubenswrapper[4840]: I1209 18:28:58.320724 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-lv92q_must-gather-2pps9_fe16bca5-1a60-4cb6-a11c-474f241a53fa/gather/0.log" Dec 09 18:29:06 crc kubenswrapper[4840]: I1209 18:29:06.737979 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-lv92q/must-gather-2pps9"] Dec 09 18:29:06 crc kubenswrapper[4840]: I1209 18:29:06.738845 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-lv92q/must-gather-2pps9" podUID="fe16bca5-1a60-4cb6-a11c-474f241a53fa" containerName="copy" containerID="cri-o://1425747da36769af2705b195370a455f13f6b6fef59a00622ca1c28e672d0f59" gracePeriod=2 Dec 09 18:29:06 crc kubenswrapper[4840]: I1209 18:29:06.754805 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-lv92q/must-gather-2pps9"] Dec 09 18:29:07 crc kubenswrapper[4840]: I1209 18:29:07.183930 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-lv92q_must-gather-2pps9_fe16bca5-1a60-4cb6-a11c-474f241a53fa/copy/0.log" Dec 09 18:29:07 crc kubenswrapper[4840]: I1209 18:29:07.184559 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe16bca5-1a60-4cb6-a11c-474f241a53fa" containerID="1425747da36769af2705b195370a455f13f6b6fef59a00622ca1c28e672d0f59" exitCode=143 Dec 09 18:29:07 crc kubenswrapper[4840]: I1209 18:29:07.643391 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-lv92q_must-gather-2pps9_fe16bca5-1a60-4cb6-a11c-474f241a53fa/copy/0.log" Dec 09 18:29:07 crc kubenswrapper[4840]: I1209 18:29:07.644206 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/must-gather-2pps9" Dec 09 18:29:07 crc kubenswrapper[4840]: I1209 18:29:07.739601 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/fe16bca5-1a60-4cb6-a11c-474f241a53fa-must-gather-output\") pod \"fe16bca5-1a60-4cb6-a11c-474f241a53fa\" (UID: \"fe16bca5-1a60-4cb6-a11c-474f241a53fa\") " Dec 09 18:29:07 crc kubenswrapper[4840]: I1209 18:29:07.739672 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnhqx\" (UniqueName: \"kubernetes.io/projected/fe16bca5-1a60-4cb6-a11c-474f241a53fa-kube-api-access-wnhqx\") pod \"fe16bca5-1a60-4cb6-a11c-474f241a53fa\" (UID: \"fe16bca5-1a60-4cb6-a11c-474f241a53fa\") " Dec 09 18:29:07 crc kubenswrapper[4840]: I1209 18:29:07.755565 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe16bca5-1a60-4cb6-a11c-474f241a53fa-kube-api-access-wnhqx" (OuterVolumeSpecName: "kube-api-access-wnhqx") pod "fe16bca5-1a60-4cb6-a11c-474f241a53fa" (UID: "fe16bca5-1a60-4cb6-a11c-474f241a53fa"). InnerVolumeSpecName "kube-api-access-wnhqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:29:07 crc kubenswrapper[4840]: I1209 18:29:07.842204 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnhqx\" (UniqueName: \"kubernetes.io/projected/fe16bca5-1a60-4cb6-a11c-474f241a53fa-kube-api-access-wnhqx\") on node \"crc\" DevicePath \"\"" Dec 09 18:29:07 crc kubenswrapper[4840]: I1209 18:29:07.915917 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe16bca5-1a60-4cb6-a11c-474f241a53fa-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "fe16bca5-1a60-4cb6-a11c-474f241a53fa" (UID: "fe16bca5-1a60-4cb6-a11c-474f241a53fa"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:29:07 crc kubenswrapper[4840]: I1209 18:29:07.944567 4840 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/fe16bca5-1a60-4cb6-a11c-474f241a53fa-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 09 18:29:08 crc kubenswrapper[4840]: I1209 18:29:08.198007 4840 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-lv92q_must-gather-2pps9_fe16bca5-1a60-4cb6-a11c-474f241a53fa/copy/0.log" Dec 09 18:29:08 crc kubenswrapper[4840]: I1209 18:29:08.198786 4840 scope.go:117] "RemoveContainer" containerID="1425747da36769af2705b195370a455f13f6b6fef59a00622ca1c28e672d0f59" Dec 09 18:29:08 crc kubenswrapper[4840]: I1209 18:29:08.198814 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lv92q/must-gather-2pps9" Dec 09 18:29:08 crc kubenswrapper[4840]: I1209 18:29:08.221027 4840 scope.go:117] "RemoveContainer" containerID="e850b1969e32236f5cf584f245aa47c22e976a16052c9f197e831041aa1fecd3" Dec 09 18:29:08 crc kubenswrapper[4840]: I1209 18:29:08.622572 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe16bca5-1a60-4cb6-a11c-474f241a53fa" path="/var/lib/kubelet/pods/fe16bca5-1a60-4cb6-a11c-474f241a53fa/volumes" Dec 09 18:29:11 crc kubenswrapper[4840]: E1209 18:29:11.611198 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:29:12 crc kubenswrapper[4840]: E1209 18:29:12.611301 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:29:22 crc kubenswrapper[4840]: E1209 18:29:22.611377 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.100707 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-42z48"] Dec 09 18:29:23 crc kubenswrapper[4840]: E1209 18:29:23.101578 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe16bca5-1a60-4cb6-a11c-474f241a53fa" containerName="gather" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.101602 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe16bca5-1a60-4cb6-a11c-474f241a53fa" containerName="gather" Dec 09 18:29:23 crc kubenswrapper[4840]: E1209 18:29:23.101631 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe16bca5-1a60-4cb6-a11c-474f241a53fa" containerName="copy" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.101639 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe16bca5-1a60-4cb6-a11c-474f241a53fa" containerName="copy" Dec 09 18:29:23 crc kubenswrapper[4840]: E1209 18:29:23.101659 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1f70057-6e36-41eb-8a78-b4badbea394e" containerName="extract-utilities" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.101668 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1f70057-6e36-41eb-8a78-b4badbea394e" containerName="extract-utilities" Dec 09 18:29:23 crc kubenswrapper[4840]: E1209 18:29:23.101695 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1f70057-6e36-41eb-8a78-b4badbea394e" containerName="extract-content" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.101703 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1f70057-6e36-41eb-8a78-b4badbea394e" containerName="extract-content" Dec 09 18:29:23 crc kubenswrapper[4840]: E1209 18:29:23.101722 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1f70057-6e36-41eb-8a78-b4badbea394e" containerName="registry-server" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.101729 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1f70057-6e36-41eb-8a78-b4badbea394e" containerName="registry-server" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.108187 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe16bca5-1a60-4cb6-a11c-474f241a53fa" containerName="gather" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.108233 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1f70057-6e36-41eb-8a78-b4badbea394e" containerName="registry-server" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.108243 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe16bca5-1a60-4cb6-a11c-474f241a53fa" containerName="copy" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.110083 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.121348 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-42z48"] Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.191186 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-catalog-content\") pod \"community-operators-42z48\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.191600 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-utilities\") pod \"community-operators-42z48\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.191890 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29nwc\" (UniqueName: \"kubernetes.io/projected/a41f4a94-635c-438f-a828-9ac74d419ddf-kube-api-access-29nwc\") pod \"community-operators-42z48\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.293929 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-catalog-content\") pod \"community-operators-42z48\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.294058 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-utilities\") pod \"community-operators-42z48\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.294204 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29nwc\" (UniqueName: \"kubernetes.io/projected/a41f4a94-635c-438f-a828-9ac74d419ddf-kube-api-access-29nwc\") pod \"community-operators-42z48\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.294487 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-catalog-content\") pod \"community-operators-42z48\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.294512 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-utilities\") pod \"community-operators-42z48\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.321317 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29nwc\" (UniqueName: \"kubernetes.io/projected/a41f4a94-635c-438f-a828-9ac74d419ddf-kube-api-access-29nwc\") pod \"community-operators-42z48\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:23 crc kubenswrapper[4840]: I1209 18:29:23.438195 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:24 crc kubenswrapper[4840]: I1209 18:29:24.005817 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-42z48"] Dec 09 18:29:24 crc kubenswrapper[4840]: I1209 18:29:24.381049 4840 generic.go:334] "Generic (PLEG): container finished" podID="a41f4a94-635c-438f-a828-9ac74d419ddf" containerID="09941ead1536aec22e50a8cfe95a55c910021487b833150bf44a7519a1cb6fd2" exitCode=0 Dec 09 18:29:24 crc kubenswrapper[4840]: I1209 18:29:24.381604 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42z48" event={"ID":"a41f4a94-635c-438f-a828-9ac74d419ddf","Type":"ContainerDied","Data":"09941ead1536aec22e50a8cfe95a55c910021487b833150bf44a7519a1cb6fd2"} Dec 09 18:29:24 crc kubenswrapper[4840]: I1209 18:29:24.383087 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42z48" event={"ID":"a41f4a94-635c-438f-a828-9ac74d419ddf","Type":"ContainerStarted","Data":"96cafde102d538a03f330378e322973583523445b0cae81e70f147a791a962fa"} Dec 09 18:29:25 crc kubenswrapper[4840]: I1209 18:29:25.393819 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42z48" event={"ID":"a41f4a94-635c-438f-a828-9ac74d419ddf","Type":"ContainerStarted","Data":"a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1"} Dec 09 18:29:26 crc kubenswrapper[4840]: I1209 18:29:26.406720 4840 generic.go:334] "Generic (PLEG): container finished" podID="a41f4a94-635c-438f-a828-9ac74d419ddf" containerID="a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1" exitCode=0 Dec 09 18:29:26 crc kubenswrapper[4840]: I1209 18:29:26.407085 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42z48" event={"ID":"a41f4a94-635c-438f-a828-9ac74d419ddf","Type":"ContainerDied","Data":"a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1"} Dec 09 18:29:26 crc kubenswrapper[4840]: E1209 18:29:26.746250 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:29:26 crc kubenswrapper[4840]: E1209 18:29:26.746320 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:29:26 crc kubenswrapper[4840]: E1209 18:29:26.746465 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:29:26 crc kubenswrapper[4840]: E1209 18:29:26.748417 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:29:27 crc kubenswrapper[4840]: I1209 18:29:27.417047 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42z48" event={"ID":"a41f4a94-635c-438f-a828-9ac74d419ddf","Type":"ContainerStarted","Data":"42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5"} Dec 09 18:29:27 crc kubenswrapper[4840]: I1209 18:29:27.441911 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-42z48" podStartSLOduration=1.9071586969999998 podStartE2EDuration="4.441894177s" podCreationTimestamp="2025-12-09 18:29:23 +0000 UTC" firstStartedPulling="2025-12-09 18:29:24.387982458 +0000 UTC m=+5550.379093091" lastFinishedPulling="2025-12-09 18:29:26.922717938 +0000 UTC m=+5552.913828571" observedRunningTime="2025-12-09 18:29:27.430826856 +0000 UTC m=+5553.421937489" watchObservedRunningTime="2025-12-09 18:29:27.441894177 +0000 UTC m=+5553.433004810" Dec 09 18:29:33 crc kubenswrapper[4840]: I1209 18:29:33.438473 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:33 crc kubenswrapper[4840]: I1209 18:29:33.440197 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:33 crc kubenswrapper[4840]: I1209 18:29:33.517347 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:34 crc kubenswrapper[4840]: I1209 18:29:34.541394 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:34 crc kubenswrapper[4840]: I1209 18:29:34.606904 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-42z48"] Dec 09 18:29:36 crc kubenswrapper[4840]: I1209 18:29:36.515286 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-42z48" podUID="a41f4a94-635c-438f-a828-9ac74d419ddf" containerName="registry-server" containerID="cri-o://42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5" gracePeriod=2 Dec 09 18:29:36 crc kubenswrapper[4840]: E1209 18:29:36.617652 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.059070 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.104589 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-catalog-content\") pod \"a41f4a94-635c-438f-a828-9ac74d419ddf\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.104783 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-utilities\") pod \"a41f4a94-635c-438f-a828-9ac74d419ddf\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.104994 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29nwc\" (UniqueName: \"kubernetes.io/projected/a41f4a94-635c-438f-a828-9ac74d419ddf-kube-api-access-29nwc\") pod \"a41f4a94-635c-438f-a828-9ac74d419ddf\" (UID: \"a41f4a94-635c-438f-a828-9ac74d419ddf\") " Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.107095 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-utilities" (OuterVolumeSpecName: "utilities") pod "a41f4a94-635c-438f-a828-9ac74d419ddf" (UID: "a41f4a94-635c-438f-a828-9ac74d419ddf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.113602 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a41f4a94-635c-438f-a828-9ac74d419ddf-kube-api-access-29nwc" (OuterVolumeSpecName: "kube-api-access-29nwc") pod "a41f4a94-635c-438f-a828-9ac74d419ddf" (UID: "a41f4a94-635c-438f-a828-9ac74d419ddf"). InnerVolumeSpecName "kube-api-access-29nwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.177372 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a41f4a94-635c-438f-a828-9ac74d419ddf" (UID: "a41f4a94-635c-438f-a828-9ac74d419ddf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.207979 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.208025 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41f4a94-635c-438f-a828-9ac74d419ddf-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.208040 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29nwc\" (UniqueName: \"kubernetes.io/projected/a41f4a94-635c-438f-a828-9ac74d419ddf-kube-api-access-29nwc\") on node \"crc\" DevicePath \"\"" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.528363 4840 generic.go:334] "Generic (PLEG): container finished" podID="a41f4a94-635c-438f-a828-9ac74d419ddf" containerID="42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5" exitCode=0 Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.528412 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42z48" event={"ID":"a41f4a94-635c-438f-a828-9ac74d419ddf","Type":"ContainerDied","Data":"42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5"} Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.528426 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-42z48" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.528447 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42z48" event={"ID":"a41f4a94-635c-438f-a828-9ac74d419ddf","Type":"ContainerDied","Data":"96cafde102d538a03f330378e322973583523445b0cae81e70f147a791a962fa"} Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.528468 4840 scope.go:117] "RemoveContainer" containerID="42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.563410 4840 scope.go:117] "RemoveContainer" containerID="a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.570627 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-42z48"] Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.594932 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-42z48"] Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.602935 4840 scope.go:117] "RemoveContainer" containerID="09941ead1536aec22e50a8cfe95a55c910021487b833150bf44a7519a1cb6fd2" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.653256 4840 scope.go:117] "RemoveContainer" containerID="42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5" Dec 09 18:29:37 crc kubenswrapper[4840]: E1209 18:29:37.653649 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5\": container with ID starting with 42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5 not found: ID does not exist" containerID="42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.653693 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5"} err="failed to get container status \"42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5\": rpc error: code = NotFound desc = could not find container \"42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5\": container with ID starting with 42f60a92138aae45a30d0e80c28f51fab3957998bc15a67d2dda64016b5403b5 not found: ID does not exist" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.653714 4840 scope.go:117] "RemoveContainer" containerID="a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1" Dec 09 18:29:37 crc kubenswrapper[4840]: E1209 18:29:37.653996 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1\": container with ID starting with a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1 not found: ID does not exist" containerID="a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.654025 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1"} err="failed to get container status \"a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1\": rpc error: code = NotFound desc = could not find container \"a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1\": container with ID starting with a4d467612f75dc9df53c475f292ad534d452ca29017dfbf157b18799ea831bf1 not found: ID does not exist" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.654042 4840 scope.go:117] "RemoveContainer" containerID="09941ead1536aec22e50a8cfe95a55c910021487b833150bf44a7519a1cb6fd2" Dec 09 18:29:37 crc kubenswrapper[4840]: E1209 18:29:37.654395 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09941ead1536aec22e50a8cfe95a55c910021487b833150bf44a7519a1cb6fd2\": container with ID starting with 09941ead1536aec22e50a8cfe95a55c910021487b833150bf44a7519a1cb6fd2 not found: ID does not exist" containerID="09941ead1536aec22e50a8cfe95a55c910021487b833150bf44a7519a1cb6fd2" Dec 09 18:29:37 crc kubenswrapper[4840]: I1209 18:29:37.654431 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09941ead1536aec22e50a8cfe95a55c910021487b833150bf44a7519a1cb6fd2"} err="failed to get container status \"09941ead1536aec22e50a8cfe95a55c910021487b833150bf44a7519a1cb6fd2\": rpc error: code = NotFound desc = could not find container \"09941ead1536aec22e50a8cfe95a55c910021487b833150bf44a7519a1cb6fd2\": container with ID starting with 09941ead1536aec22e50a8cfe95a55c910021487b833150bf44a7519a1cb6fd2 not found: ID does not exist" Dec 09 18:29:38 crc kubenswrapper[4840]: I1209 18:29:38.623850 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a41f4a94-635c-438f-a828-9ac74d419ddf" path="/var/lib/kubelet/pods/a41f4a94-635c-438f-a828-9ac74d419ddf/volumes" Dec 09 18:29:41 crc kubenswrapper[4840]: E1209 18:29:41.615834 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:29:51 crc kubenswrapper[4840]: E1209 18:29:51.611077 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:29:55 crc kubenswrapper[4840]: E1209 18:29:55.614046 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.161271 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx"] Dec 09 18:30:00 crc kubenswrapper[4840]: E1209 18:30:00.162397 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a41f4a94-635c-438f-a828-9ac74d419ddf" containerName="registry-server" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.162415 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="a41f4a94-635c-438f-a828-9ac74d419ddf" containerName="registry-server" Dec 09 18:30:00 crc kubenswrapper[4840]: E1209 18:30:00.162443 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a41f4a94-635c-438f-a828-9ac74d419ddf" containerName="extract-utilities" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.162452 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="a41f4a94-635c-438f-a828-9ac74d419ddf" containerName="extract-utilities" Dec 09 18:30:00 crc kubenswrapper[4840]: E1209 18:30:00.162502 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a41f4a94-635c-438f-a828-9ac74d419ddf" containerName="extract-content" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.162516 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="a41f4a94-635c-438f-a828-9ac74d419ddf" containerName="extract-content" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.162797 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="a41f4a94-635c-438f-a828-9ac74d419ddf" containerName="registry-server" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.163993 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.166694 4840 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.167038 4840 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.238036 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx"] Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.267926 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-config-volume\") pod \"collect-profiles-29421750-7hvrx\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.268422 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2s9k\" (UniqueName: \"kubernetes.io/projected/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-kube-api-access-s2s9k\") pod \"collect-profiles-29421750-7hvrx\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.268548 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-secret-volume\") pod \"collect-profiles-29421750-7hvrx\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.370330 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2s9k\" (UniqueName: \"kubernetes.io/projected/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-kube-api-access-s2s9k\") pod \"collect-profiles-29421750-7hvrx\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.370424 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-secret-volume\") pod \"collect-profiles-29421750-7hvrx\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.370517 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-config-volume\") pod \"collect-profiles-29421750-7hvrx\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.372200 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-config-volume\") pod \"collect-profiles-29421750-7hvrx\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.380024 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-secret-volume\") pod \"collect-profiles-29421750-7hvrx\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.388730 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2s9k\" (UniqueName: \"kubernetes.io/projected/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-kube-api-access-s2s9k\") pod \"collect-profiles-29421750-7hvrx\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:00 crc kubenswrapper[4840]: I1209 18:30:00.541765 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:01 crc kubenswrapper[4840]: I1209 18:30:01.058500 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx"] Dec 09 18:30:01 crc kubenswrapper[4840]: I1209 18:30:01.842189 4840 generic.go:334] "Generic (PLEG): container finished" podID="9f0c5b30-1a1a-431c-90a2-f9495d2e4891" containerID="a79c9842ab827c59db2c2d1f7c7e631bb61ed50c86914a8651ea28f1328370b1" exitCode=0 Dec 09 18:30:01 crc kubenswrapper[4840]: I1209 18:30:01.842268 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" event={"ID":"9f0c5b30-1a1a-431c-90a2-f9495d2e4891","Type":"ContainerDied","Data":"a79c9842ab827c59db2c2d1f7c7e631bb61ed50c86914a8651ea28f1328370b1"} Dec 09 18:30:01 crc kubenswrapper[4840]: I1209 18:30:01.842419 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" event={"ID":"9f0c5b30-1a1a-431c-90a2-f9495d2e4891","Type":"ContainerStarted","Data":"d87c463b221b827476be8574dc7dcda7c6c65180e1126cdf333bca4c06683b4c"} Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.357383 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.441423 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-config-volume\") pod \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.441669 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-secret-volume\") pod \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.441734 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2s9k\" (UniqueName: \"kubernetes.io/projected/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-kube-api-access-s2s9k\") pod \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\" (UID: \"9f0c5b30-1a1a-431c-90a2-f9495d2e4891\") " Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.442834 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-config-volume" (OuterVolumeSpecName: "config-volume") pod "9f0c5b30-1a1a-431c-90a2-f9495d2e4891" (UID: "9f0c5b30-1a1a-431c-90a2-f9495d2e4891"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.447518 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-kube-api-access-s2s9k" (OuterVolumeSpecName: "kube-api-access-s2s9k") pod "9f0c5b30-1a1a-431c-90a2-f9495d2e4891" (UID: "9f0c5b30-1a1a-431c-90a2-f9495d2e4891"). InnerVolumeSpecName "kube-api-access-s2s9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.449177 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9f0c5b30-1a1a-431c-90a2-f9495d2e4891" (UID: "9f0c5b30-1a1a-431c-90a2-f9495d2e4891"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.544436 4840 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.544498 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2s9k\" (UniqueName: \"kubernetes.io/projected/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-kube-api-access-s2s9k\") on node \"crc\" DevicePath \"\"" Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.544517 4840 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f0c5b30-1a1a-431c-90a2-f9495d2e4891-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.868033 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" event={"ID":"9f0c5b30-1a1a-431c-90a2-f9495d2e4891","Type":"ContainerDied","Data":"d87c463b221b827476be8574dc7dcda7c6c65180e1126cdf333bca4c06683b4c"} Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.868395 4840 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d87c463b221b827476be8574dc7dcda7c6c65180e1126cdf333bca4c06683b4c" Dec 09 18:30:03 crc kubenswrapper[4840]: I1209 18:30:03.868257 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421750-7hvrx" Dec 09 18:30:04 crc kubenswrapper[4840]: I1209 18:30:04.465285 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn"] Dec 09 18:30:04 crc kubenswrapper[4840]: I1209 18:30:04.477143 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421705-6c2nn"] Dec 09 18:30:04 crc kubenswrapper[4840]: I1209 18:30:04.626076 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7081818-31e8-4020-99eb-f658ed9adc9f" path="/var/lib/kubelet/pods/b7081818-31e8-4020-99eb-f658ed9adc9f/volumes" Dec 09 18:30:05 crc kubenswrapper[4840]: E1209 18:30:05.610124 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:30:06 crc kubenswrapper[4840]: E1209 18:30:06.611566 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:30:13 crc kubenswrapper[4840]: I1209 18:30:13.957265 4840 scope.go:117] "RemoveContainer" containerID="30099bf8ce7949efa21639a62d1a1e5ff585993db4000056f8f156adb6cb7499" Dec 09 18:30:19 crc kubenswrapper[4840]: E1209 18:30:19.611953 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:30:20 crc kubenswrapper[4840]: E1209 18:30:20.611059 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:30:34 crc kubenswrapper[4840]: I1209 18:30:34.037100 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:30:34 crc kubenswrapper[4840]: I1209 18:30:34.037796 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:30:34 crc kubenswrapper[4840]: E1209 18:30:34.627857 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:30:34 crc kubenswrapper[4840]: E1209 18:30:34.645686 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:30:49 crc kubenswrapper[4840]: E1209 18:30:49.612198 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:30:49 crc kubenswrapper[4840]: E1209 18:30:49.612269 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:31:01 crc kubenswrapper[4840]: E1209 18:31:01.611727 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:31:01 crc kubenswrapper[4840]: E1209 18:31:01.612644 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:31:04 crc kubenswrapper[4840]: I1209 18:31:04.036530 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:31:04 crc kubenswrapper[4840]: I1209 18:31:04.036908 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:31:14 crc kubenswrapper[4840]: E1209 18:31:14.621544 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:31:16 crc kubenswrapper[4840]: E1209 18:31:16.611512 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:31:28 crc kubenswrapper[4840]: E1209 18:31:28.611076 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:31:30 crc kubenswrapper[4840]: E1209 18:31:30.613118 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:31:34 crc kubenswrapper[4840]: I1209 18:31:34.036756 4840 patch_prober.go:28] interesting pod/machine-config-daemon-kr6l2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 18:31:34 crc kubenswrapper[4840]: I1209 18:31:34.038088 4840 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 18:31:34 crc kubenswrapper[4840]: I1209 18:31:34.038213 4840 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" Dec 09 18:31:34 crc kubenswrapper[4840]: I1209 18:31:34.039195 4840 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d"} pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 18:31:34 crc kubenswrapper[4840]: I1209 18:31:34.039368 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" containerName="machine-config-daemon" containerID="cri-o://b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" gracePeriod=600 Dec 09 18:31:34 crc kubenswrapper[4840]: E1209 18:31:34.164029 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:31:34 crc kubenswrapper[4840]: I1209 18:31:34.928821 4840 generic.go:334] "Generic (PLEG): container finished" podID="fe6d320b-3a64-4724-93af-500d38c77974" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" exitCode=0 Dec 09 18:31:34 crc kubenswrapper[4840]: I1209 18:31:34.928930 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" event={"ID":"fe6d320b-3a64-4724-93af-500d38c77974","Type":"ContainerDied","Data":"b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d"} Dec 09 18:31:34 crc kubenswrapper[4840]: I1209 18:31:34.929418 4840 scope.go:117] "RemoveContainer" containerID="d8c10b7f78d31263d0a8a8b062489523e4cf7492edcfad92bd6b1f456eefb478" Dec 09 18:31:34 crc kubenswrapper[4840]: I1209 18:31:34.930370 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:31:34 crc kubenswrapper[4840]: E1209 18:31:34.930951 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:31:40 crc kubenswrapper[4840]: E1209 18:31:40.612673 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:31:42 crc kubenswrapper[4840]: E1209 18:31:42.611182 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:31:46 crc kubenswrapper[4840]: I1209 18:31:46.609782 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:31:46 crc kubenswrapper[4840]: E1209 18:31:46.610775 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:31:51 crc kubenswrapper[4840]: E1209 18:31:51.610187 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:31:53 crc kubenswrapper[4840]: E1209 18:31:53.610757 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:32:00 crc kubenswrapper[4840]: I1209 18:32:00.611152 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:32:00 crc kubenswrapper[4840]: E1209 18:32:00.615125 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:32:04 crc kubenswrapper[4840]: E1209 18:32:04.617362 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:32:04 crc kubenswrapper[4840]: E1209 18:32:04.617426 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:32:14 crc kubenswrapper[4840]: I1209 18:32:14.071306 4840 scope.go:117] "RemoveContainer" containerID="d46bba17ef57333f0695434301091e92c4689cf8d3a448eefbdf16e4f5b27089" Dec 09 18:32:14 crc kubenswrapper[4840]: I1209 18:32:14.103952 4840 scope.go:117] "RemoveContainer" containerID="715b51e4d3858afc8ed84d3f0be72dd7b91afc9266270ac60aefe81b468971ff" Dec 09 18:32:14 crc kubenswrapper[4840]: I1209 18:32:14.172121 4840 scope.go:117] "RemoveContainer" containerID="ffed1b9e481fba105a292875280870f0830a4b053cd0335ad152a699ce86e3e3" Dec 09 18:32:15 crc kubenswrapper[4840]: I1209 18:32:15.609040 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:32:15 crc kubenswrapper[4840]: E1209 18:32:15.610563 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:32:17 crc kubenswrapper[4840]: E1209 18:32:17.610822 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:32:19 crc kubenswrapper[4840]: E1209 18:32:19.611584 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:32:28 crc kubenswrapper[4840]: I1209 18:32:28.609688 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:32:28 crc kubenswrapper[4840]: E1209 18:32:28.610827 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:32:29 crc kubenswrapper[4840]: E1209 18:32:29.611375 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:32:30 crc kubenswrapper[4840]: E1209 18:32:30.610952 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:32:43 crc kubenswrapper[4840]: I1209 18:32:43.609140 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:32:43 crc kubenswrapper[4840]: E1209 18:32:43.610042 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:32:43 crc kubenswrapper[4840]: E1209 18:32:43.611164 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:32:43 crc kubenswrapper[4840]: E1209 18:32:43.611433 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:32:56 crc kubenswrapper[4840]: E1209 18:32:56.610207 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:32:57 crc kubenswrapper[4840]: I1209 18:32:57.608623 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:32:57 crc kubenswrapper[4840]: E1209 18:32:57.609350 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:32:58 crc kubenswrapper[4840]: E1209 18:32:58.610091 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:33:08 crc kubenswrapper[4840]: I1209 18:33:08.608844 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:33:08 crc kubenswrapper[4840]: E1209 18:33:08.609635 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:33:10 crc kubenswrapper[4840]: E1209 18:33:10.612220 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:33:12 crc kubenswrapper[4840]: E1209 18:33:12.610709 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:33:19 crc kubenswrapper[4840]: I1209 18:33:19.608676 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:33:19 crc kubenswrapper[4840]: E1209 18:33:19.609845 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:33:25 crc kubenswrapper[4840]: E1209 18:33:25.611555 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:33:26 crc kubenswrapper[4840]: E1209 18:33:26.611051 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:33:34 crc kubenswrapper[4840]: I1209 18:33:34.617884 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:33:34 crc kubenswrapper[4840]: E1209 18:33:34.619024 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.367018 4840 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7w9nw"] Dec 09 18:33:36 crc kubenswrapper[4840]: E1209 18:33:36.368526 4840 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f0c5b30-1a1a-431c-90a2-f9495d2e4891" containerName="collect-profiles" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.368544 4840 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f0c5b30-1a1a-431c-90a2-f9495d2e4891" containerName="collect-profiles" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.368801 4840 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f0c5b30-1a1a-431c-90a2-f9495d2e4891" containerName="collect-profiles" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.370929 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.382147 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7w9nw"] Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.472296 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-utilities\") pod \"redhat-operators-7w9nw\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.472520 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-catalog-content\") pod \"redhat-operators-7w9nw\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.472787 4840 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxx5r\" (UniqueName: \"kubernetes.io/projected/721e27d0-ec1f-42fc-b16d-bf2650f7f978-kube-api-access-rxx5r\") pod \"redhat-operators-7w9nw\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.575251 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxx5r\" (UniqueName: \"kubernetes.io/projected/721e27d0-ec1f-42fc-b16d-bf2650f7f978-kube-api-access-rxx5r\") pod \"redhat-operators-7w9nw\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.575384 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-utilities\") pod \"redhat-operators-7w9nw\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.575465 4840 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-catalog-content\") pod \"redhat-operators-7w9nw\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.576068 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-catalog-content\") pod \"redhat-operators-7w9nw\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.576364 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-utilities\") pod \"redhat-operators-7w9nw\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.596735 4840 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxx5r\" (UniqueName: \"kubernetes.io/projected/721e27d0-ec1f-42fc-b16d-bf2650f7f978-kube-api-access-rxx5r\") pod \"redhat-operators-7w9nw\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:36 crc kubenswrapper[4840]: I1209 18:33:36.697593 4840 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:37 crc kubenswrapper[4840]: I1209 18:33:37.221999 4840 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7w9nw"] Dec 09 18:33:37 crc kubenswrapper[4840]: I1209 18:33:37.258213 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7w9nw" event={"ID":"721e27d0-ec1f-42fc-b16d-bf2650f7f978","Type":"ContainerStarted","Data":"f93181a803a9d2774d3597db0e377a782fa255863e7a13e4bb184f8a73cbbcfd"} Dec 09 18:33:37 crc kubenswrapper[4840]: E1209 18:33:37.609911 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:33:37 crc kubenswrapper[4840]: E1209 18:33:37.610041 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:33:38 crc kubenswrapper[4840]: I1209 18:33:38.272374 4840 generic.go:334] "Generic (PLEG): container finished" podID="721e27d0-ec1f-42fc-b16d-bf2650f7f978" containerID="3a2456ee9f3240aa94c1441f9e78cf79d2ab072f59d91dc7896186e91630a2b9" exitCode=0 Dec 09 18:33:38 crc kubenswrapper[4840]: I1209 18:33:38.272418 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7w9nw" event={"ID":"721e27d0-ec1f-42fc-b16d-bf2650f7f978","Type":"ContainerDied","Data":"3a2456ee9f3240aa94c1441f9e78cf79d2ab072f59d91dc7896186e91630a2b9"} Dec 09 18:33:40 crc kubenswrapper[4840]: I1209 18:33:40.297672 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7w9nw" event={"ID":"721e27d0-ec1f-42fc-b16d-bf2650f7f978","Type":"ContainerStarted","Data":"fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d"} Dec 09 18:33:42 crc kubenswrapper[4840]: I1209 18:33:42.328071 4840 generic.go:334] "Generic (PLEG): container finished" podID="721e27d0-ec1f-42fc-b16d-bf2650f7f978" containerID="fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d" exitCode=0 Dec 09 18:33:42 crc kubenswrapper[4840]: I1209 18:33:42.328135 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7w9nw" event={"ID":"721e27d0-ec1f-42fc-b16d-bf2650f7f978","Type":"ContainerDied","Data":"fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d"} Dec 09 18:33:43 crc kubenswrapper[4840]: I1209 18:33:43.340541 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7w9nw" event={"ID":"721e27d0-ec1f-42fc-b16d-bf2650f7f978","Type":"ContainerStarted","Data":"14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4"} Dec 09 18:33:43 crc kubenswrapper[4840]: I1209 18:33:43.359613 4840 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7w9nw" podStartSLOduration=2.836755303 podStartE2EDuration="7.359592091s" podCreationTimestamp="2025-12-09 18:33:36 +0000 UTC" firstStartedPulling="2025-12-09 18:33:38.275005128 +0000 UTC m=+5804.266115771" lastFinishedPulling="2025-12-09 18:33:42.797841876 +0000 UTC m=+5808.788952559" observedRunningTime="2025-12-09 18:33:43.357029429 +0000 UTC m=+5809.348140072" watchObservedRunningTime="2025-12-09 18:33:43.359592091 +0000 UTC m=+5809.350702724" Dec 09 18:33:46 crc kubenswrapper[4840]: I1209 18:33:46.698673 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:46 crc kubenswrapper[4840]: I1209 18:33:46.699039 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:47 crc kubenswrapper[4840]: I1209 18:33:47.749814 4840 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7w9nw" podUID="721e27d0-ec1f-42fc-b16d-bf2650f7f978" containerName="registry-server" probeResult="failure" output=< Dec 09 18:33:47 crc kubenswrapper[4840]: timeout: failed to connect service ":50051" within 1s Dec 09 18:33:47 crc kubenswrapper[4840]: > Dec 09 18:33:48 crc kubenswrapper[4840]: I1209 18:33:48.608802 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:33:48 crc kubenswrapper[4840]: E1209 18:33:48.609236 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:33:50 crc kubenswrapper[4840]: E1209 18:33:50.611711 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:33:51 crc kubenswrapper[4840]: I1209 18:33:51.617416 4840 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 18:33:51 crc kubenswrapper[4840]: E1209 18:33:51.713926 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:33:51 crc kubenswrapper[4840]: E1209 18:33:51.714055 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 09 18:33:51 crc kubenswrapper[4840]: E1209 18:33:51.714293 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wtfrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-trhsb_openstack(5124c5e9-268a-473a-abe6-b5d1af073124): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:33:51 crc kubenswrapper[4840]: E1209 18:33:51.716182 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:33:56 crc kubenswrapper[4840]: I1209 18:33:56.774303 4840 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:56 crc kubenswrapper[4840]: I1209 18:33:56.861366 4840 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:57 crc kubenswrapper[4840]: I1209 18:33:57.024868 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7w9nw"] Dec 09 18:33:58 crc kubenswrapper[4840]: I1209 18:33:58.507783 4840 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7w9nw" podUID="721e27d0-ec1f-42fc-b16d-bf2650f7f978" containerName="registry-server" containerID="cri-o://14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4" gracePeriod=2 Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.091458 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.257890 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-catalog-content\") pod \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.258101 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxx5r\" (UniqueName: \"kubernetes.io/projected/721e27d0-ec1f-42fc-b16d-bf2650f7f978-kube-api-access-rxx5r\") pod \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.258191 4840 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-utilities\") pod \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\" (UID: \"721e27d0-ec1f-42fc-b16d-bf2650f7f978\") " Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.259522 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-utilities" (OuterVolumeSpecName: "utilities") pod "721e27d0-ec1f-42fc-b16d-bf2650f7f978" (UID: "721e27d0-ec1f-42fc-b16d-bf2650f7f978"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.264983 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/721e27d0-ec1f-42fc-b16d-bf2650f7f978-kube-api-access-rxx5r" (OuterVolumeSpecName: "kube-api-access-rxx5r") pod "721e27d0-ec1f-42fc-b16d-bf2650f7f978" (UID: "721e27d0-ec1f-42fc-b16d-bf2650f7f978"). InnerVolumeSpecName "kube-api-access-rxx5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.372646 4840 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxx5r\" (UniqueName: \"kubernetes.io/projected/721e27d0-ec1f-42fc-b16d-bf2650f7f978-kube-api-access-rxx5r\") on node \"crc\" DevicePath \"\"" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.372677 4840 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.381780 4840 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "721e27d0-ec1f-42fc-b16d-bf2650f7f978" (UID: "721e27d0-ec1f-42fc-b16d-bf2650f7f978"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.475059 4840 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/721e27d0-ec1f-42fc-b16d-bf2650f7f978-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.517377 4840 generic.go:334] "Generic (PLEG): container finished" podID="721e27d0-ec1f-42fc-b16d-bf2650f7f978" containerID="14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4" exitCode=0 Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.517563 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7w9nw" event={"ID":"721e27d0-ec1f-42fc-b16d-bf2650f7f978","Type":"ContainerDied","Data":"14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4"} Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.517724 4840 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7w9nw" event={"ID":"721e27d0-ec1f-42fc-b16d-bf2650f7f978","Type":"ContainerDied","Data":"f93181a803a9d2774d3597db0e377a782fa255863e7a13e4bb184f8a73cbbcfd"} Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.517746 4840 scope.go:117] "RemoveContainer" containerID="14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.517634 4840 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7w9nw" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.549325 4840 scope.go:117] "RemoveContainer" containerID="fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.554453 4840 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7w9nw"] Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.564301 4840 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7w9nw"] Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.576203 4840 scope.go:117] "RemoveContainer" containerID="3a2456ee9f3240aa94c1441f9e78cf79d2ab072f59d91dc7896186e91630a2b9" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.618041 4840 scope.go:117] "RemoveContainer" containerID="14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4" Dec 09 18:33:59 crc kubenswrapper[4840]: E1209 18:33:59.618594 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4\": container with ID starting with 14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4 not found: ID does not exist" containerID="14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.618637 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4"} err="failed to get container status \"14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4\": rpc error: code = NotFound desc = could not find container \"14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4\": container with ID starting with 14ec3708d9c4c81a58d37e3a207fc4146a1a6b32ef144b72916c0dbe188dbbd4 not found: ID does not exist" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.618665 4840 scope.go:117] "RemoveContainer" containerID="fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d" Dec 09 18:33:59 crc kubenswrapper[4840]: E1209 18:33:59.619026 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d\": container with ID starting with fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d not found: ID does not exist" containerID="fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.619059 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d"} err="failed to get container status \"fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d\": rpc error: code = NotFound desc = could not find container \"fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d\": container with ID starting with fb02e91e36a3c0b686bf23d3c05a8fa0a72b325f231a52d118a8544b310b511d not found: ID does not exist" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.619079 4840 scope.go:117] "RemoveContainer" containerID="3a2456ee9f3240aa94c1441f9e78cf79d2ab072f59d91dc7896186e91630a2b9" Dec 09 18:33:59 crc kubenswrapper[4840]: E1209 18:33:59.619289 4840 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a2456ee9f3240aa94c1441f9e78cf79d2ab072f59d91dc7896186e91630a2b9\": container with ID starting with 3a2456ee9f3240aa94c1441f9e78cf79d2ab072f59d91dc7896186e91630a2b9 not found: ID does not exist" containerID="3a2456ee9f3240aa94c1441f9e78cf79d2ab072f59d91dc7896186e91630a2b9" Dec 09 18:33:59 crc kubenswrapper[4840]: I1209 18:33:59.619319 4840 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a2456ee9f3240aa94c1441f9e78cf79d2ab072f59d91dc7896186e91630a2b9"} err="failed to get container status \"3a2456ee9f3240aa94c1441f9e78cf79d2ab072f59d91dc7896186e91630a2b9\": rpc error: code = NotFound desc = could not find container \"3a2456ee9f3240aa94c1441f9e78cf79d2ab072f59d91dc7896186e91630a2b9\": container with ID starting with 3a2456ee9f3240aa94c1441f9e78cf79d2ab072f59d91dc7896186e91630a2b9 not found: ID does not exist" Dec 09 18:34:00 crc kubenswrapper[4840]: I1209 18:34:00.627531 4840 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="721e27d0-ec1f-42fc-b16d-bf2650f7f978" path="/var/lib/kubelet/pods/721e27d0-ec1f-42fc-b16d-bf2650f7f978/volumes" Dec 09 18:34:01 crc kubenswrapper[4840]: E1209 18:34:01.610146 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:34:02 crc kubenswrapper[4840]: I1209 18:34:02.608938 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:34:02 crc kubenswrapper[4840]: E1209 18:34:02.609502 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:34:03 crc kubenswrapper[4840]: E1209 18:34:03.610687 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:34:14 crc kubenswrapper[4840]: E1209 18:34:14.636899 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:34:15 crc kubenswrapper[4840]: I1209 18:34:15.609143 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:34:15 crc kubenswrapper[4840]: E1209 18:34:15.609958 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:34:16 crc kubenswrapper[4840]: E1209 18:34:16.612048 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:34:26 crc kubenswrapper[4840]: I1209 18:34:26.609448 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:34:26 crc kubenswrapper[4840]: E1209 18:34:26.610619 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:34:27 crc kubenswrapper[4840]: E1209 18:34:27.610833 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" Dec 09 18:34:29 crc kubenswrapper[4840]: E1209 18:34:29.751783 4840 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:34:29 crc kubenswrapper[4840]: E1209 18:34:29.752184 4840 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 18:34:29 crc kubenswrapper[4840]: E1209 18:34:29.752376 4840 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh546hd5h5f6h65ch64fh65dh9h69h75hdfhb8h79hf9h675h5dch6fh657h5b6h574h646h9h5b8h666h687h668h676h64fh69h64ch6bhb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kg746,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9ec426c3-8fdd-42d9-9ea5-5d751112ee04): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 18:34:29 crc kubenswrapper[4840]: E1209 18:34:29.753563 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:34:38 crc kubenswrapper[4840]: I1209 18:34:38.616179 4840 scope.go:117] "RemoveContainer" containerID="b97c7acd3224d6253d3505f610d3ee48a7298dacf719a6e23ce3a5fccec1d44d" Dec 09 18:34:38 crc kubenswrapper[4840]: E1209 18:34:38.624789 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kr6l2_openshift-machine-config-operator(fe6d320b-3a64-4724-93af-500d38c77974)\"" pod="openshift-machine-config-operator/machine-config-daemon-kr6l2" podUID="fe6d320b-3a64-4724-93af-500d38c77974" Dec 09 18:34:41 crc kubenswrapper[4840]: E1209 18:34:41.610998 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="9ec426c3-8fdd-42d9-9ea5-5d751112ee04" Dec 09 18:34:42 crc kubenswrapper[4840]: E1209 18:34:42.614411 4840 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-trhsb" podUID="5124c5e9-268a-473a-abe6-b5d1af073124" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515116065712024451 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015116065713017367 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015116051750016506 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015116051751015457 5ustar corecore